code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "convnextv2"
def __init__( self : Tuple , _lowerCAmelCase : str=3 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Any=1E-12 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Dict=224 , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Tuple , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_stages
SCREAMING_SNAKE_CASE_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE_ = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = ['stem'] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names ) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCamelCase__ : List[Any] = parser.parse_args()
lowerCamelCase__ : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 31 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 1 |
import random
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : Dict ) -> tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
SCREAMING_SNAKE_CASE_ = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _partition(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) ) | 31 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : int = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = kwargs.get('model_save_dir' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = kwargs.get('latest_model_name' , _lowerCAmelCase )
def __call__( self : int , **_lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
SCREAMING_SNAKE_CASE_ = 'CPUExecutionProvider'
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE_ = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE_ = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : int , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE_ = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).parent
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).name
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = None
if len(str(_lowerCAmelCase ).split('@' ) ) == 2:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_id.split('@' )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , ) | 31 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> Dict:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = is_small_dataset(__UpperCAmelCase )
assert result == expected | 31 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "git_vision_model"
def __init__( self : str , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : str=3_072 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Any=224 , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : Optional[Any]="quick_gelu" , _lowerCAmelCase : Tuple=1E-5 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Tuple=0.02 , **_lowerCAmelCase : Any , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = hidden_act
@classmethod
def lowerCAmelCase_ ( cls : Union[str, Any] , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : Any ):
cls._set_token_in_kwargs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "git"
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=30_522 , _lowerCAmelCase : Union[str, Any]=768 , _lowerCAmelCase : List[str]=6 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : str=3_072 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Optional[int]=1_024 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=1E-12 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : Any="absolute" , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Optional[Any]=101 , _lowerCAmelCase : Tuple=102 , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[str] , ):
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE_ = GitVisionConfig(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = tie_word_embeddings
SCREAMING_SNAKE_CASE_ = num_image_with_embedding
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 31 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = 1.5
SCREAMING_SNAKE_CASE_ = int(factor * num_class_images )
SCREAMING_SNAKE_CASE_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__UpperCAmelCase , aesthetic_weight=0.1 )
os.makedirs(f"{class_data_dir}/images" , exist_ok=__UpperCAmelCase )
if len(list(Path(f"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE_ = client.query(text=__UpperCAmelCase )
if len(__UpperCAmelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
SCREAMING_SNAKE_CASE_ = int(factor * num_images )
SCREAMING_SNAKE_CASE_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__UpperCAmelCase , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tqdm(desc='downloading real regularization images' , total=__UpperCAmelCase )
with open(f"{class_data_dir}/caption.txt" , 'w' ) as fa, open(f"{class_data_dir}/urls.txt" , 'w' ) as fa, open(
f"{class_data_dir}/images.txt" , 'w' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE_ = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE_ = requests.get(images['url'] )
if img.status_code == 2_00:
SCREAMING_SNAKE_CASE_ = Image.open(BytesIO(img.content ) )
with open(f"{class_data_dir}/images/{total}.jpg" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(f"{class_data_dir}/images/{total}.jpg" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('' , add_help=__UpperCAmelCase )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__UpperCAmelCase , type=__UpperCAmelCase )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__UpperCAmelCase , type=__UpperCAmelCase )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=__UpperCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 31 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase__ : Dict = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowerCamelCase__ : Any = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowerCamelCase__ : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase__ : str = f'''down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : List[Any] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase__ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : str = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase__ : List[Any] = f'''up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : Dict = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase__ : Union[str, Any] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCamelCase__ : Union[str, Any] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase__ : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase__ : Dict = 'mid_block.attentions.0.'
lowerCamelCase__ : Any = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase__ : Union[str, Any] = f'''mid_block.resnets.{j}.'''
lowerCamelCase__ : int = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> int:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
SCREAMING_SNAKE_CASE_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase__ : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Union[str, Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase__ : int = f'''down_blocks.{i}.downsamplers.0.'''
lowerCamelCase__ : List[Any] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase__ : List[Any] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : int = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase__ : Dict = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Any = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase__ : List[str] = f'''mid_block.resnets.{i}.'''
lowerCamelCase__ : str = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[str]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE_ = v.replace(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
SCREAMING_SNAKE_CASE_ = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE_ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE_ = reshape_weight_for_sd(__UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowerCamelCase__ : Tuple = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase__ : str = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase__ : Optional[int] = {'q': 0, 'k': 1, 'v': 2}
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
SCREAMING_SNAKE_CASE_ = k[: -len('.q_proj.weight' )]
SCREAMING_SNAKE_CASE_ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE_ = [None, None, None]
SCREAMING_SNAKE_CASE_ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
SCREAMING_SNAKE_CASE_ = k[: -len('.q_proj.bias' )]
SCREAMING_SNAKE_CASE_ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE_ = [None, None, None]
SCREAMING_SNAKE_CASE_ = v
continue
SCREAMING_SNAKE_CASE_ = textenc_pattern.sub(lambda __UpperCAmelCase : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE_ = textenc_pattern.sub(lambda __UpperCAmelCase : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.cat(__UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE_ = textenc_pattern.sub(lambda __UpperCAmelCase : protected[re.escape(m.group(0 ) )] , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.cat(__UpperCAmelCase )
return new_state_dict
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowerCamelCase__ : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase__ : Union[str, Any] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowerCamelCase__ : Dict = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowerCamelCase__ : List[str] = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase__ : Any = load_file(unet_path, device='cpu')
else:
lowerCamelCase__ : List[str] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowerCamelCase__ : Any = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowerCamelCase__ : Tuple = load_file(vae_path, device='cpu')
else:
lowerCamelCase__ : Any = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowerCamelCase__ : str = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowerCamelCase__ : List[str] = load_file(text_enc_path, device='cpu')
else:
lowerCamelCase__ : Optional[Any] = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowerCamelCase__ : int = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowerCamelCase__ : List[str] = convert_unet_state_dict(unet_state_dict)
lowerCamelCase__ : Union[str, Any] = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCamelCase__ : int = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase__ : str = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase__ : int = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowerCamelCase__ : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase__ : Any = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase__ : List[str] = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase__ : Any = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase__ : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase__ : Dict = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase__ : Optional[int] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path) | 31 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE_ = quote(__UpperCAmelCase )
return hfh.hf_hub_url(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' , revision=__UpperCAmelCase ) | 31 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 1 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase__ : Tuple = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Tuple=18 , _lowerCAmelCase : str=30 , _lowerCAmelCase : str=400 , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=None , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = do_convert_rgb
SCREAMING_SNAKE_CASE_ = [512, 1_024, 2_048, 4_096]
SCREAMING_SNAKE_CASE_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def lowerCAmelCase_ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE_ = 2_048
SCREAMING_SNAKE_CASE_ = image_processor(_lowerCAmelCase , return_tensors='pt' , max_patches=_lowerCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
_lowerCAmelCase , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self : int ):
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE_ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
SCREAMING_SNAKE_CASE_ = 'Hello'
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
_lowerCAmelCase , return_tensors='pt' , max_patches=_lowerCAmelCase , header_text=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
_lowerCAmelCase , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
_lowerCAmelCase , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE_ = 3
@property
def lowerCAmelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_convert_rgb' ) )
def lowerCAmelCase_ ( self : List[str] ):
# Initialize image_processor
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processor(
_lowerCAmelCase , return_tensors='pt' , max_patches=_lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 31 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 1 |
import math
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> list[int]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = int(math.sqrt(__UpperCAmelCase ) ) # Size of every segment
SCREAMING_SNAKE_CASE_ = [True] * (end + 1)
SCREAMING_SNAKE_CASE_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__UpperCAmelCase )
for i in range(start * start , end + 1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE_ = end + 1
SCREAMING_SNAKE_CASE_ = min(2 * end , __UpperCAmelCase )
while low <= n:
SCREAMING_SNAKE_CASE_ = [True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__UpperCAmelCase , high + 1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = False
for j in range(len(__UpperCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE_ = high + 1
SCREAMING_SNAKE_CASE_ = min(high + end , __UpperCAmelCase )
return prime
print(sieve(10**6)) | 31 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[int]=99 , _lowerCAmelCase : int=64 , _lowerCAmelCase : str=5 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Tuple=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = vocab_size - 1
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self : Tuple ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = GPTNeoXModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = GPTNeoXModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = GPTNeoXForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = GPTNeoXForQuestionAnswering(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = GPTNeoXForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = GPTNeoXForTokenClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = GPTNeoXForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output_from_no_past['hidden_states'][0]
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = GPTNeoXModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
# This regression test was failing with PyTorch < 1.3
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_ = None
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowerCAmelCase_ ( self : str ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = GPTNeoXModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ = GPTNeoXModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
SCREAMING_SNAKE_CASE_ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
SCREAMING_SNAKE_CASE_ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
SCREAMING_SNAKE_CASE_ = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=20 )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) | 31 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 1 |
import os
import string
import sys
lowerCamelCase__ : Any = 1 << 8
lowerCamelCase__ : Optional[int] = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
lowerCamelCase__ : Dict = KEYMAP['up']
lowerCamelCase__ : int = KEYMAP['left']
if sys.platform == "win32":
lowerCamelCase__ : Any = []
lowerCamelCase__ : List[str] = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
lowerCamelCase__ : Union[str, Any] = ord(str(i))
def UpperCAmelCase_ ( ) -> Dict:
if os.name == "nt":
import msvcrt
SCREAMING_SNAKE_CASE_ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
SCREAMING_SNAKE_CASE_ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
SCREAMING_SNAKE_CASE_ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
SCREAMING_SNAKE_CASE_ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
SCREAMING_SNAKE_CASE_ = chr(KEYMAP['esc'] )
except KeyError:
SCREAMING_SNAKE_CASE_ = cha[1]
else:
SCREAMING_SNAKE_CASE_ = ch.decode(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
SCREAMING_SNAKE_CASE_ = sys.stdin.fileno()
SCREAMING_SNAKE_CASE_ = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def UpperCAmelCase_ ( ) -> List[str]:
SCREAMING_SNAKE_CASE_ = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
SCREAMING_SNAKE_CASE_ = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
SCREAMING_SNAKE_CASE_ = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 31 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "gptsan-japanese"
lowercase_ = [
"past_key_values",
]
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , _lowerCAmelCase : Any=36_000 , _lowerCAmelCase : Any=1_280 , _lowerCAmelCase : Any=1_024 , _lowerCAmelCase : Union[str, Any]=8_192 , _lowerCAmelCase : int=4_096 , _lowerCAmelCase : int=128 , _lowerCAmelCase : int=10 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Any=128 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : List[str]=1E-5 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Any="float32" , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : int=False , _lowerCAmelCase : int=0.002 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=35_998 , _lowerCAmelCase : int=35_995 , _lowerCAmelCase : str=35_999 , **_lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = d_ff
SCREAMING_SNAKE_CASE_ = d_ext
SCREAMING_SNAKE_CASE_ = d_spout
SCREAMING_SNAKE_CASE_ = num_switch_layers
SCREAMING_SNAKE_CASE_ = num_ext_layers
SCREAMING_SNAKE_CASE_ = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = num_experts
SCREAMING_SNAKE_CASE_ = expert_capacity
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = router_bias
SCREAMING_SNAKE_CASE_ = router_jitter_noise
SCREAMING_SNAKE_CASE_ = router_dtype
SCREAMING_SNAKE_CASE_ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE_ = output_hidden_states
SCREAMING_SNAKE_CASE_ = output_attentions
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = output_router_logits
SCREAMING_SNAKE_CASE_ = use_cache
super().__init__(
separator_token_id=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) | 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : List[str] ):
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _lowerCAmelCase , )
super().__init__(args=_lowerCAmelCase , **_lowerCAmelCase ) | 31 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> str:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__UpperCAmelCase , int(b / 2 ) ) * actual_power(__UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(__UpperCAmelCase , int(b / 2 ) ) * actual_power(__UpperCAmelCase , int(b / 2 ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(__UpperCAmelCase , __UpperCAmelCase )
return actual_power(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3)) | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase__ : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 1 |
# Algorithm for the pigeonhole sorting
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = min(__UpperCAmelCase ) # min() finds the minimum value
SCREAMING_SNAKE_CASE_ = max(__UpperCAmelCase ) # max() finds the maximum value
SCREAMING_SNAKE_CASE_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE_ = 0
for count in range(__UpperCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE_ = count + min_val
i += 1
def UpperCAmelCase_ ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__UpperCAmelCase )
print('Sorted order is:' , ' '.join(__UpperCAmelCase ) )
if __name__ == "__main__":
main() | 31 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42 # [batch_size x 3]
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def lowerCAmelCase_ ( self : Dict ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCAmelCase_ ( self : Optional[Any] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCAmelCase_ ( self : str ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE_ = torch.stack(
[
pixel_indices % self.width,
torch.div(_lowerCAmelCase , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = self.shape
SCREAMING_SNAKE_CASE_ = int(np.prod(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = self.get_image_coords()
SCREAMING_SNAKE_CASE_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE_ = self.get_camera_rays(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = rays.view(_lowerCAmelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : torch.Tensor ):
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE_ = coords.view(_lowerCAmelCase , -1 , 2 )
SCREAMING_SNAKE_CASE_ = self.resolution()
SCREAMING_SNAKE_CASE_ = self.fov()
SCREAMING_SNAKE_CASE_ = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE_ = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE_ = fracs.view(_lowerCAmelCase , -1 , 2 )
SCREAMING_SNAKE_CASE_ = (
self.z.view(_lowerCAmelCase , 1 , 3 )
+ self.x.view(_lowerCAmelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_lowerCAmelCase , 1 , 3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE_ = directions / directions.norm(dim=-1 , keepdim=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.stack(
[
torch.broadcast_to(self.origin.view(_lowerCAmelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_lowerCAmelCase , *_lowerCAmelCase , 2 , 3 )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_lowerCAmelCase , height=_lowerCAmelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> DifferentiableProjectiveCamera:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
SCREAMING_SNAKE_CASE_ = np.array([np.sin(__UpperCAmelCase ), np.cos(__UpperCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE_ = -z * 4
SCREAMING_SNAKE_CASE_ = np.array([np.cos(__UpperCAmelCase ), -np.sin(__UpperCAmelCase ), 0.0] )
SCREAMING_SNAKE_CASE_ = np.cross(__UpperCAmelCase , __UpperCAmelCase )
origins.append(__UpperCAmelCase )
xs.append(__UpperCAmelCase )
ys.append(__UpperCAmelCase )
zs.append(__UpperCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__UpperCAmelCase , axis=0 ) ).float() , width=__UpperCAmelCase , height=__UpperCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__UpperCAmelCase )) , ) | 31 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = IFPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase_ ( self : str ):
return self._get_dummy_components()
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : str ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCAmelCase_ ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase_ ( self : Dict ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase_ ( self : List[Any] ):
self._test_save_load_local()
def lowerCAmelCase_ ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCAmelCase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Any ):
# if
SCREAMING_SNAKE_CASE_ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE_ = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE_ = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe_a(
prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , num_inference_steps=2 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = pipe_a(
prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , image=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe_a(
prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=2 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = pipe_a(
prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , image=_lowerCAmelCase , original_image=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
# pipeline 1
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe_a(
prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , num_inference_steps=2 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = pipe_a(
prompt_embeds=_lowerCAmelCase , negative_prompt_embeds=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , original_image=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase_ ( ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 1 |
import os
from collections.abc import Iterator
def UpperCAmelCase_ ( __UpperCAmelCase : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase ).lstrip('./' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Optional[Any]:
return f"{i * ' '}*" if i else "\n##"
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(__UpperCAmelCase )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def UpperCAmelCase_ ( __UpperCAmelCase : str = "." ) -> None:
SCREAMING_SNAKE_CASE_ = ''
for filepath in sorted(good_file_paths(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = os.path.split(__UpperCAmelCase )
if filepath != old_path:
SCREAMING_SNAKE_CASE_ = print_path(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = (filepath.count(os.sep ) + 1) if filepath else 0
SCREAMING_SNAKE_CASE_ = f"{filepath}/{filename}".replace(' ' , '%20' )
SCREAMING_SNAKE_CASE_ = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f"{md_prefix(__UpperCAmelCase )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.') | 31 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def UpperCAmelCase_ ( __UpperCAmelCase : List[str]=None ) -> Any:
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('test' )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=__UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
SCREAMING_SNAKE_CASE_ = script_name
else:
SCREAMING_SNAKE_CASE_ = f"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE_ = ['accelerate-launch'] + test_args.split()
SCREAMING_SNAKE_CASE_ = execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def UpperCAmelCase_ ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = test_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
test_command(__UpperCAmelCase )
if __name__ == "__main__":
main() | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from maths.prime_check import is_prime
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f"Input value of [number={number}] must be an integer"
raise TypeError(__UpperCAmelCase )
if is_prime(__UpperCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 1 |
import math
import qiskit
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
SCREAMING_SNAKE_CASE_ = qiskit.QuantumRegister(4 , 'qr' )
SCREAMING_SNAKE_CASE_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
SCREAMING_SNAKE_CASE_ = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __UpperCAmelCase ) # measure the last two qbits
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
SCREAMING_SNAKE_CASE_ = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=10_00 )
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''') | 31 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 1 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Dict:
SCREAMING_SNAKE_CASE_ = 0
if start < end:
SCREAMING_SNAKE_CASE_ = randint(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = a[end]
SCREAMING_SNAKE_CASE_ = a[pivot]
SCREAMING_SNAKE_CASE_ = temp
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = randint(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = a[end]
SCREAMING_SNAKE_CASE_ = a[pivot]
SCREAMING_SNAKE_CASE_ = temp
SCREAMING_SNAKE_CASE_ = start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
SCREAMING_SNAKE_CASE_ = new_pivot_index + 1
SCREAMING_SNAKE_CASE_ = a[new_pivot_index]
SCREAMING_SNAKE_CASE_ = a[index]
SCREAMING_SNAKE_CASE_ = temp
SCREAMING_SNAKE_CASE_ = a[new_pivot_index + 1]
SCREAMING_SNAKE_CASE_ = a[end]
SCREAMING_SNAKE_CASE_ = temp
return new_pivot_index + 1, count
lowerCamelCase__ : List[Any] = TemporaryFile()
lowerCamelCase__ : Optional[Any] = 100 # 1000 elements are to be sorted
lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 1 # mean and standard deviation
lowerCamelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowerCamelCase__ : Union[str, Any] = np.load(outfile)
lowerCamelCase__ : Union[str, Any] = len(M) - 1
lowerCamelCase__ : Dict = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z) | 31 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE_ = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCAmelCase )
assert base_extractor.is_extractable(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(__UpperCAmelCase , __UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE_ = file_path.read_text(encoding='utf-8' )
else:
SCREAMING_SNAKE_CASE_ = output_path.read_text(encoding='utf-8' )
SCREAMING_SNAKE_CASE_ = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
SCREAMING_SNAKE_CASE_ = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE_ = f"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = Extractor.infer_extractor_format(__UpperCAmelCase )
assert extractor_format is not None
SCREAMING_SNAKE_CASE_ = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE_ = file_path.read_text(encoding='utf-8' )
else:
SCREAMING_SNAKE_CASE_ = output_path.read_text(encoding='utf-8' )
SCREAMING_SNAKE_CASE_ = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> int:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path / 'data_dot_dot'
directory.mkdir()
SCREAMING_SNAKE_CASE_ = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path / 'data_sym_link'
directory.mkdir()
SCREAMING_SNAKE_CASE_ = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=__UpperCAmelCase )
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE_ = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE_ = tmp_path / 'extracted'
TarExtractor.extract(__UpperCAmelCase , __UpperCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
SCREAMING_SNAKE_CASE_ = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE_ = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(__UpperCAmelCase )
assert zipfile.is_zipfile(str(__UpperCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCAmelCase ) # but we're right | 31 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'depth_multiplier' ) )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=13 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Dict=0.25 , _lowerCAmelCase : str=8 , _lowerCAmelCase : Dict=8 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : str=32 , _lowerCAmelCase : int=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="relu6" , _lowerCAmelCase : Optional[int]=1_280 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Union[str, Any]=10 , _lowerCAmelCase : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = depth_multiplier
SCREAMING_SNAKE_CASE_ = depth_divisible_by
SCREAMING_SNAKE_CASE_ = min_depth
SCREAMING_SNAKE_CASE_ = expand_ratio
SCREAMING_SNAKE_CASE_ = tf_padding
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ = finegrained_output
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : str ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = MobileNetVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileNetVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileNetVaForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE_ = MobileNetVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Any ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
def check_hidden_states_output(_lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = 16
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = MobileNetVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
SCREAMING_SNAKE_CASE_ = model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) | 31 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=("DownEncoderBlock2D",) , _lowerCAmelCase : Any=(64,) , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : Any="silu" , _lowerCAmelCase : Any=True , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = layers_per_block
SCREAMING_SNAKE_CASE_ = torch.nn.Convad(
_lowerCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
for i, down_block_type in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ = get_down_block(
_lowerCAmelCase , num_layers=self.layers_per_block , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
self.down_blocks.append(_lowerCAmelCase )
# mid
SCREAMING_SNAKE_CASE_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# out
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowerCAmelCase , eps=1E-6 )
SCREAMING_SNAKE_CASE_ = nn.SiLU()
SCREAMING_SNAKE_CASE_ = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE_ = nn.Convad(block_out_channels[-1] , _lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = x
SCREAMING_SNAKE_CASE_ = self.conv_in(_lowerCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase : Dict ):
def custom_forward(*_lowerCAmelCase : Optional[Any] ):
return module(*_lowerCAmelCase )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowerCAmelCase )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ = down_block(_lowerCAmelCase )
# middle
SCREAMING_SNAKE_CASE_ = self.mid_block(_lowerCAmelCase )
# post-process
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_act(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_out(_lowerCAmelCase )
return sample
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=3 , _lowerCAmelCase : List[Any]=("UpDecoderBlock2D",) , _lowerCAmelCase : List[str]=(64,) , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : int="silu" , _lowerCAmelCase : Any="group" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = layers_per_block
SCREAMING_SNAKE_CASE_ = nn.Convad(
_lowerCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ = in_channels if norm_type == 'spatial' else None
# mid
SCREAMING_SNAKE_CASE_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# up
SCREAMING_SNAKE_CASE_ = list(reversed(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_ = get_up_block(
_lowerCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , resnet_time_scale_shift=_lowerCAmelCase , )
self.up_blocks.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE_ = SpatialNorm(block_out_channels[0] , _lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowerCAmelCase , eps=1E-6 )
SCREAMING_SNAKE_CASE_ = nn.SiLU()
SCREAMING_SNAKE_CASE_ = nn.Convad(block_out_channels[0] , _lowerCAmelCase , 3 , padding=1 )
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=None ):
SCREAMING_SNAKE_CASE_ = z
SCREAMING_SNAKE_CASE_ = self.conv_in(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase : Dict ):
def custom_forward(*_lowerCAmelCase : int ):
return module(*_lowerCAmelCase )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
else:
# middle
SCREAMING_SNAKE_CASE_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE_ = up_block(_lowerCAmelCase , _lowerCAmelCase )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = self.conv_norm_out(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_act(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.conv_out(_lowerCAmelCase )
return sample
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : List[str]="random" , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=True ):
super().__init__()
SCREAMING_SNAKE_CASE_ = n_e
SCREAMING_SNAKE_CASE_ = vq_embed_dim
SCREAMING_SNAKE_CASE_ = beta
SCREAMING_SNAKE_CASE_ = legacy
SCREAMING_SNAKE_CASE_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE_ = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE_ = self.used.shape[0]
SCREAMING_SNAKE_CASE_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE_ = self.re_embed
SCREAMING_SNAKE_CASE_ = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
SCREAMING_SNAKE_CASE_ = n_e
SCREAMING_SNAKE_CASE_ = sane_index_shape
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = inds.shape
assert len(_lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ = self.used.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE_ = match.argmax(-1 )
SCREAMING_SNAKE_CASE_ = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE_ = self.unknown_index
return new.reshape(_lowerCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = inds.shape
assert len(_lowerCAmelCase ) > 1
SCREAMING_SNAKE_CASE_ = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE_ = self.used.to(_lowerCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE_ = 0 # simply set to zero
SCREAMING_SNAKE_CASE_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowerCAmelCase )
return back.reshape(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[int] ):
# reshape z -> (batch, height, width, channel) and flatten
SCREAMING_SNAKE_CASE_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE_ = torch.argmin(torch.cdist(_lowerCAmelCase , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE_ = self.embedding(_lowerCAmelCase ).view(z.shape )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE_ = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ = self.remap_to_used(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
SCREAMING_SNAKE_CASE_ = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE_ = self.unmap_to_all(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE_ = self.embedding(_lowerCAmelCase )
if shape is not None:
SCREAMING_SNAKE_CASE_ = z_q.view(_lowerCAmelCase )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=False ):
SCREAMING_SNAKE_CASE_ = parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.chunk(_lowerCAmelCase , 2 , dim=1 )
SCREAMING_SNAKE_CASE_ = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE_ = deterministic
SCREAMING_SNAKE_CASE_ = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE_ = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
SCREAMING_SNAKE_CASE_ = randn_tensor(
self.mean.shape , generator=_lowerCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE_ = self.mean + self.std * sample
return x
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
return self.mean | 31 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
SCREAMING_SNAKE_CASE_ = set()
# Replace all the whitespace in our sentence
SCREAMING_SNAKE_CASE_ = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__UpperCAmelCase ) == 26
def UpperCAmelCase_ ( __UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
SCREAMING_SNAKE_CASE_ = [False] * 26
for char in input_str:
if char.islower():
SCREAMING_SNAKE_CASE_ = True
elif char.isupper():
SCREAMING_SNAKE_CASE_ = True
return all(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
SCREAMING_SNAKE_CASE_ = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=__UpperCAmelCase ) )
print(timeit('is_pangram_faster()' , setup=__UpperCAmelCase ) )
print(timeit('is_pangram_fastest()' , setup=__UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase__ : Any = 100
lowerCamelCase__ : Optional[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCAmelCase_ ( __UpperCAmelCase : int = 50_00 ) -> int | None:
for number_to_partition in range(1 , __UpperCAmelCase ):
if len(partition(__UpperCAmelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 31 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCamelCase__ : Any = NewType('DataClass', Any)
lowerCamelCase__ : Tuple = NewType('DataClassType', Any)
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Tuple:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> Callable[[str], Any]:
SCREAMING_SNAKE_CASE_ = {str(__UpperCAmelCase ): choice for choice in choices}
return lambda __UpperCAmelCase : str_to_choice.get(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( *,
__UpperCAmelCase : Union[str, List[str]] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : Any = dataclasses.MISSING , __UpperCAmelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCAmelCase : dict = None , **__UpperCAmelCase : Dict , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE_ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE_ = aliases
if help is not None:
SCREAMING_SNAKE_CASE_ = help
return dataclasses.field(metadata=__UpperCAmelCase , default=__UpperCAmelCase , default_factory=__UpperCAmelCase , **__UpperCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
def __init__( self : Any , _lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCAmelCase : Dict ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE_ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [dataclass_types]
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser , _lowerCAmelCase : dataclasses.Field ):
SCREAMING_SNAKE_CASE_ = F"--{field.name}"
SCREAMING_SNAKE_CASE_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCAmelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('aliases' , [] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [aliases]
SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase , 'UnionType' ) and isinstance(_lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F" Problem encountered in field '{field.name}'." )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE_ = (
field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE_ = {}
if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE_ = field.type.__args__
else:
SCREAMING_SNAKE_CASE_ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE_ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default
else:
SCREAMING_SNAKE_CASE_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE_ = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE_ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE_ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE_ = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = field.type.__args__[0]
SCREAMING_SNAKE_CASE_ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = True
else:
SCREAMING_SNAKE_CASE_ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default_factory()
else:
SCREAMING_SNAKE_CASE_ = True
parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE_ = False
parser.add_argument(F"--no_{field.name}" , action='store_false' , dest=field.name , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : DataClassType ):
if hasattr(_lowerCAmelCase , '_argument_group_name' ):
SCREAMING_SNAKE_CASE_ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE_ = self
try:
SCREAMING_SNAKE_CASE_ = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = '.'.join(map(_lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE_ = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int=None , _lowerCAmelCase : str=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE_ = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE_ = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = args_file_parser.parse_known_args(args=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE_ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.parse_known_args(args=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE_ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict[str, Any] , _lowerCAmelCase : bool = False ):
SCREAMING_SNAKE_CASE_ = set(args.keys() )
SCREAMING_SNAKE_CASE_ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE_ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE_ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}" )
return tuple(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
with open(Path(_lowerCAmelCase ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE_ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE_ = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
SCREAMING_SNAKE_CASE_ = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase ) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = b.T
SCREAMING_SNAKE_CASE_ = np.sum(np.square(__UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_ = np.sum(np.square(__UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_ = np.matmul(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_ = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase )
return np.argmin(__UpperCAmelCase , axis=1 )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase ) if clusters is not None else None
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = do_color_quantize
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
_lowerCAmelCase , size=(size['height'], size['width']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_ = rescale(image=_lowerCAmelCase , scale=1 / 127.5 , data_format=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image - 1
return image
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_ = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=_lowerCAmelCase ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = color_quantize(_lowerCAmelCase , _lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_ = images.shape[0]
SCREAMING_SNAKE_CASE_ = images.reshape(_lowerCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ = {'input_ids': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase ) | 31 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 1 |
import math
def UpperCAmelCase_ ( ) -> None:
SCREAMING_SNAKE_CASE_ = input('Enter message: ' )
SCREAMING_SNAKE_CASE_ = int(input(f"Enter key [2-{len(__UpperCAmelCase ) - 1}]: " ) )
SCREAMING_SNAKE_CASE_ = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
SCREAMING_SNAKE_CASE_ = encrypt_message(__UpperCAmelCase , __UpperCAmelCase )
elif mode.lower().startswith('d' ):
SCREAMING_SNAKE_CASE_ = decrypt_message(__UpperCAmelCase , __UpperCAmelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = [''] * key
for col in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = col
while pointer < len(__UpperCAmelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : str ) -> str:
SCREAMING_SNAKE_CASE_ = math.ceil(len(__UpperCAmelCase ) / key )
SCREAMING_SNAKE_CASE_ = key
SCREAMING_SNAKE_CASE_ = (num_cols * num_rows) - len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [''] * num_cols
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
SCREAMING_SNAKE_CASE_ = 0
row += 1
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 31 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 1 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
lowerCamelCase__ : Any = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
lowerCamelCase__ : str = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase__ : List[Any] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True) | 31 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 1 |
from __future__ import annotations
import time
lowerCamelCase__ : int = list[tuple[int, int]]
lowerCamelCase__ : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Node | None ):
SCREAMING_SNAKE_CASE_ = pos_x
SCREAMING_SNAKE_CASE_ = pos_y
SCREAMING_SNAKE_CASE_ = (pos_y, pos_x)
SCREAMING_SNAKE_CASE_ = goal_x
SCREAMING_SNAKE_CASE_ = goal_y
SCREAMING_SNAKE_CASE_ = parent
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : tuple[int, int] ):
SCREAMING_SNAKE_CASE_ = Node(start[1] , start[0] , goal[1] , goal[0] , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [self.start]
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : List[str] ):
while self.node_queue:
SCREAMING_SNAKE_CASE_ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE_ = True
return self.retrace_path(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_successors(_lowerCAmelCase )
for node in successors:
self.node_queue.append(_lowerCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = []
for action in delta:
SCREAMING_SNAKE_CASE_ = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_lowerCAmelCase , _lowerCAmelCase , self.target.pos_y , self.target.pos_x , _lowerCAmelCase ) )
return successors
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Node | None ):
SCREAMING_SNAKE_CASE_ = node
SCREAMING_SNAKE_CASE_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_ = current_node.parent
path.reverse()
return path
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = BreadthFirstSearch(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BreadthFirstSearch(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = False
def lowerCAmelCase_ ( self : List[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE_ = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE_ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE_ = True
return self.retrace_bidirectional_path(
_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = current_bwd_node
SCREAMING_SNAKE_CASE_ = current_fwd_node
SCREAMING_SNAKE_CASE_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(_lowerCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_lowerCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_lowerCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Node , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = self.fwd_bfs.retrace_path(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.bwd_bfs.retrace_path(_lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase__ : List[Any] = (0, 0)
lowerCamelCase__ : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase__ : Optional[int] = time.time()
lowerCamelCase__ : Any = BreadthFirstSearch(init, goal)
lowerCamelCase__ : Dict = bfs.search()
lowerCamelCase__ : Optional[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowerCamelCase__ : Tuple = time.time()
lowerCamelCase__ : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase__ : Optional[Any] = bd_bfs.search()
lowerCamelCase__ : List[Any] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time) | 31 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : list[list] ) -> list[list]:
SCREAMING_SNAKE_CASE_ = current_set.copy()
for row_index, row in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = row[0]
for column_index, column in enumerate(__UpperCAmelCase ):
if magnitude == 0:
SCREAMING_SNAKE_CASE_ = column
continue
SCREAMING_SNAKE_CASE_ = column / magnitude
# Subtract to cancel term
SCREAMING_SNAKE_CASE_ = current_set[0]
SCREAMING_SNAKE_CASE_ = [first_row]
SCREAMING_SNAKE_CASE_ = current_set[1::]
for row in current_set:
SCREAMING_SNAKE_CASE_ = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__UpperCAmelCase )
continue
for column_index in range(len(__UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
SCREAMING_SNAKE_CASE_ = final_set[0]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
SCREAMING_SNAKE_CASE_ = simplify(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = resultant
return final_set
def UpperCAmelCase_ ( __UpperCAmelCase : list[list] ) -> list:
if len(__UpperCAmelCase ) == 0:
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) + 1
if any(len(__UpperCAmelCase ) != _length for item in equations ):
raise IndexError('solve_simultaneous() requires n lists of length n+1' )
for row in equations:
if any(not isinstance(__UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError('solve_simultaneous() requires lists of integers' )
if len(__UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
SCREAMING_SNAKE_CASE_ = equations.copy()
if any(0 in row for row in data_set ):
SCREAMING_SNAKE_CASE_ = data_set.copy()
SCREAMING_SNAKE_CASE_ = []
for row_index, row in enumerate(__UpperCAmelCase ):
if 0 not in row:
SCREAMING_SNAKE_CASE_ = data_set.pop(__UpperCAmelCase )
break
if not full_row:
raise ValueError('solve_simultaneous() requires at least 1 full equation' )
data_set.insert(0 , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = data_set.copy()
SCREAMING_SNAKE_CASE_ = simplify(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = simplified[::-1]
SCREAMING_SNAKE_CASE_ = []
for row in simplified:
SCREAMING_SNAKE_CASE_ = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
SCREAMING_SNAKE_CASE_ = row.copy()[: len(__UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__UpperCAmelCase ) == 0:
solutions.append(0 )
continue
SCREAMING_SNAKE_CASE_ = temp_row[1::]
SCREAMING_SNAKE_CASE_ = temp_row[::-1]
for column_index, column in enumerate(__UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for item in solutions:
final.append(float(round(__UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : List[str] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]])) | 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 1 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 37
SCREAMING_SNAKE_CASE_ = 'gelu'
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 0.02
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = None
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = TFDistilBertModel(config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = TFDistilBertForMaskedLM(config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = TFDistilBertForQuestionAnswering(config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDistilBertForSequenceClassification(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = TFDistilBertForMultipleChoice(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDistilBertForTokenClassification(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowercase_ = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , dim=37 )
def lowerCAmelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE_ = TFDistilBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = [1, 6, 768]
self.assertEqual(output.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) | 31 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
import sys
lowerCamelCase__ : Optional[int] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = 1
for digit in s:
product *= int(__UpperCAmelCase )
return product
def UpperCAmelCase_ ( __UpperCAmelCase : str = N ) -> int:
SCREAMING_SNAKE_CASE_ = -sys.maxsize - 1
SCREAMING_SNAKE_CASE_ = n[:13]
SCREAMING_SNAKE_CASE_ = 13
while cur_index < len(__UpperCAmelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE_ = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE_ = max(__UpperCAmelCase , str_eval(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''') | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "roberta"
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any]=50_265 , _lowerCAmelCase : List[Any]=768 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : List[str]=12 , _lowerCAmelCase : str=3_072 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Dict=1E-12 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]="absolute" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 31 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ : Dict = 'RegNetConfig'
# Base docstring
lowerCamelCase__ : Union[str, Any] = 'facebook/regnet-y-040'
lowerCamelCase__ : Dict = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ : List[Any] = 'facebook/regnet-y-040'
lowerCamelCase__ : Any = 'tabby, tabby cat'
lowerCamelCase__ : List[Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[str] = "relu" , **_lowerCAmelCase : Tuple , ):
super().__init__(**_lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=_lowerCAmelCase , strides=_lowerCAmelCase , padding='VALID' , groups=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='convolution' , )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
SCREAMING_SNAKE_CASE_ = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.convolution(self.padding(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = self.normalization(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : List[Any] ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = config.num_channels
SCREAMING_SNAKE_CASE_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = shape_list(_lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.embedder(_lowerCAmelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Dict ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=1 , strides=_lowerCAmelCase , use_bias=_lowerCAmelCase , name='convolution' )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False ):
return self.normalization(self.convolution(_lowerCAmelCase ) , training=_lowerCAmelCase )
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , **_lowerCAmelCase : Any ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='pooler' )
SCREAMING_SNAKE_CASE_ = [
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hidden_state * pooled
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : str ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE_ = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='layer.2' ),
]
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.shortcut(_lowerCAmelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , **_lowerCAmelCase : Dict ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
SCREAMING_SNAKE_CASE_ = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(_lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name='layer.3' ),
]
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.shortcut(_lowerCAmelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE_ = self.activation(_lowerCAmelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : RegNetConfig , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , **_lowerCAmelCase : Optional[Any] ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
SCREAMING_SNAKE_CASE_ = [
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , name='layers.0' ),
*[layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Optional[int] ):
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ = layer_module(_lowerCAmelCase )
return hidden_state
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : RegNetConfig , **_lowerCAmelCase : str ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
SCREAMING_SNAKE_CASE_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase , name=F"stages.{i+1}" ) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True ):
SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ = stage_module(_lowerCAmelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
@keras_serializable
class lowerCamelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
lowercase_ = RegNetConfig
def __init__( self : int , _lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = TFRegNetEmbeddings(_lowerCAmelCase , name='embedder' )
SCREAMING_SNAKE_CASE_ = TFRegNetEncoder(_lowerCAmelCase , name='encoder' )
SCREAMING_SNAKE_CASE_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.embedder(_lowerCAmelCase , training=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ = self.pooler(_lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE_ = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = tuple([tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = RegNetConfig
lowercase_ = "regnet"
lowercase_ = "pixel_values"
@property
def lowerCAmelCase_ ( self : List[str] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCamelCase__ : int = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ : Any = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _SCREAMING_SNAKE_CASE , )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Tuple ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(_lowerCAmelCase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : tf.Tensor , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : str=False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.regnet(
pixel_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _SCREAMING_SNAKE_CASE , )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : RegNetConfig , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[int] ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = TFRegNetMainLayer(_lowerCAmelCase , name='regnet' )
# classification head
SCREAMING_SNAKE_CASE_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : tf.Tensor = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.regnet(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ = self.classifier[0](_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.classifier[1](_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = None if labels is None else self.hf_compute_loss(labels=_lowerCAmelCase , logits=_lowerCAmelCase )
if not return_dict:
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states ) | 31 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 1 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=14 , _lowerCAmelCase : int=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Union[str, Any]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = use_mc_token_ids
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , *_lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase_ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : int ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
@slow
def lowerCAmelCase_ ( self : str ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : int ):
pass
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_lowerCAmelCase ) # Legal the president is
SCREAMING_SNAKE_CASE_ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase ) | 31 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "yolos"
def __init__( self : int , _lowerCAmelCase : Any=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : List[str]=3_072 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Optional[int]=1E-12 , _lowerCAmelCase : List[str]=[512, 864] , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : str=True , _lowerCAmelCase : int=100 , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Union[str, Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : List[Any]=5 , _lowerCAmelCase : int=2 , _lowerCAmelCase : int=0.1 , **_lowerCAmelCase : Dict , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = num_detection_tokens
SCREAMING_SNAKE_CASE_ = use_mid_position_embeddings
SCREAMING_SNAKE_CASE_ = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1E-4
@property
def lowerCAmelCase_ ( self : List[str] ):
return 12 | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : str = "▁" , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[str, AddedToken] = "<unk>" , _lowerCAmelCase : Union[str, AddedToken] = "</s>" , _lowerCAmelCase : Union[str, AddedToken] = "<pad>" , ):
SCREAMING_SNAKE_CASE_ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
SCREAMING_SNAKE_CASE_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
SCREAMING_SNAKE_CASE_ = token_dict['token']
SCREAMING_SNAKE_CASE_ = Tokenizer(Unigram() )
SCREAMING_SNAKE_CASE_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
SCREAMING_SNAKE_CASE_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=_lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
SCREAMING_SNAKE_CASE_ = decoders.Metaspace(replacement=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
SCREAMING_SNAKE_CASE_ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Union[str, List[str]] , _lowerCAmelCase : int = 8_000 , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [files]
self._tokenizer.train(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _lowerCAmelCase : int = 8_000 , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = trainers.UnigramTrainer(
vocab_size=_lowerCAmelCase , special_tokens=self.special_tokens_list , show_progress=_lowerCAmelCase , )
self._tokenizer.train_from_iterator(_lowerCAmelCase , trainer=_lowerCAmelCase )
self.add_unk_id()
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = json.loads(self._tokenizer.to_str() )
SCREAMING_SNAKE_CASE_ = self.special_tokens['unk']['id']
SCREAMING_SNAKE_CASE_ = Tokenizer.from_str(json.dumps(_lowerCAmelCase ) ) | 31 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase__ : int = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : List[Any] = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCAmelCase_ ( ) -> int:
SCREAMING_SNAKE_CASE_ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ = bs[:]
SCREAMING_SNAKE_CASE_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
return pairs
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="replace" , _lowerCAmelCase : List[str]="<s>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : Dict="</s>" , _lowerCAmelCase : Tuple="<s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : List[str]="<mask>" , _lowerCAmelCase : Optional[Any]=False , **_lowerCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else bos_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else eos_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else sep_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else cls_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase_ ( self : int ):
return len(self.encoder )
def lowerCAmelCase_ ( self : Optional[int] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_lowerCAmelCase ):
try:
SCREAMING_SNAKE_CASE_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = word
return word
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = []
for token in re.findall(self.pat , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Union[str, Any] ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[Any] ):
return self.decoder.get(_lowerCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = ''.join(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
SCREAMING_SNAKE_CASE_ = 0
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCAmelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ = ' ' + text
return (text, kwargs)
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_ = super()._pad(
encoded_inputs=_lowerCAmelCase , max_length=_lowerCAmelCase , padding_strategy=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE_ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE_ = len(encoded_inputs['global_attention_mask'] ) != len(_lowerCAmelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE_ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE_ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["image_processor", "tokenizer"]
lowercase_ = "BlipImageProcessor"
lowercase_ = "AutoTokenizer"
def __init__( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : Dict , _lowerCAmelCase : ImageInput = None , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : Any , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE_ = self.tokenizer
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE_ = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE_ = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def lowerCAmelCase_ ( self : int , *_lowerCAmelCase : int , **_lowerCAmelCase : Optional[int] ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 31 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
SCREAMING_SNAKE_CASE_ = img
SCREAMING_SNAKE_CASE_ = img.shape[1]
SCREAMING_SNAKE_CASE_ = img.shape[0]
SCREAMING_SNAKE_CASE_ = dst_width
SCREAMING_SNAKE_CASE_ = dst_height
SCREAMING_SNAKE_CASE_ = self.src_w / self.dst_w
SCREAMING_SNAKE_CASE_ = self.src_h / self.dst_h
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCAmelCase_ ( self : Optional[int] ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
SCREAMING_SNAKE_CASE_ = self.img[self.get_y(_lowerCAmelCase )][self.get_x(_lowerCAmelCase )]
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : int ):
return int(self.ratio_x * x )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 800, 600
lowerCamelCase__ : Any = imread('image_data/lena.jpg', 1)
lowerCamelCase__ : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows() | 31 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase__ : Optional[int] = trt.Logger(trt.Logger.WARNING)
lowerCamelCase__ : Tuple = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase__ : str = logging.getLogger(__name__)
lowerCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
lowerCamelCase__ : Tuple = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
lowerCamelCase__ : Dict = args.per_device_eval_batch_size
lowerCamelCase__ : Dict = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Union[str, Any] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
lowerCamelCase__ : Any = 'temp_engine/bert-fp16.engine'
if args.inta:
lowerCamelCase__ : Dict = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
lowerCamelCase__ : Dict = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase__ : Optional[Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase__ : Tuple = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase__ : List[Any] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase__ : Optional[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase__ : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = np.asarray(inputs['input_ids'] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __UpperCAmelCase )
# start time
SCREAMING_SNAKE_CASE_ = time.time()
# Run inference
context.execute_async(
bindings=[int(__UpperCAmelCase ) for d_inp in d_inputs] + [int(__UpperCAmelCase ), int(__UpperCAmelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
cuda.memcpy_dtoh_async(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = end_time - start_time
SCREAMING_SNAKE_CASE_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase__ : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase__ : Optional[int] = raw_datasets['validation'].column_names
lowerCamelCase__ : Tuple = 'question' if 'question' in column_names else column_names[0]
lowerCamelCase__ : str = 'context' if 'context' in column_names else column_names[1]
lowerCamelCase__ : Union[str, Any] = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase__ : str = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase__ : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Optional[Any]:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
SCREAMING_SNAKE_CASE_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
SCREAMING_SNAKE_CASE_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=__UpperCAmelCase , stride=args.doc_stride , return_overflowing_tokens=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
SCREAMING_SNAKE_CASE_ = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
SCREAMING_SNAKE_CASE_ = tokenized_examples.sequence_ids(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
SCREAMING_SNAKE_CASE_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
SCREAMING_SNAKE_CASE_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
lowerCamelCase__ : Optional[Any] = raw_datasets['validation']
# Validation Feature Creation
lowerCamelCase__ : Union[str, Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
lowerCamelCase__ : Any = default_data_collator
lowerCamelCase__ : str = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
lowerCamelCase__ : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple="eval" ) -> Tuple:
# Post-processing: we match the start logits and end logits to answers in the original context.
SCREAMING_SNAKE_CASE_ = postprocess_qa_predictions(
examples=__UpperCAmelCase , features=__UpperCAmelCase , predictions=__UpperCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__UpperCAmelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
SCREAMING_SNAKE_CASE_ = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
SCREAMING_SNAKE_CASE_ = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
SCREAMING_SNAKE_CASE_ = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__UpperCAmelCase , label_ids=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Union[str, Any]:
return trt.volume(engine.get_binding_shape(__UpperCAmelCase ) ) * engine.get_binding_dtype(__UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase__ : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase__ : List[str] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase__ : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase__ : Any = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase__ : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase__ : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase__ : Optional[int] = 0.0
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Union[str, Any] = timeit.default_timer()
lowerCamelCase__ : int = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs
lowerCamelCase__ : Union[str, Any] = torch.tensor(start_logits)
lowerCamelCase__ : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase__ : Union[str, Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCamelCase__ : str = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCamelCase__ : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase__ : Any = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCamelCase__ : int = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase__ : Dict = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
lowerCamelCase__ : Any = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase__ : int = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''') | 31 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> List[Any]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'text': 'string'}
SCREAMING_SNAKE_CASE_ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ = TextDatasetReader(__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'text': 'string'}
SCREAMING_SNAKE_CASE_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase , split=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = text_path
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = [text_path]
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'text': 'string'}
SCREAMING_SNAKE_CASE_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_dataset(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple=("train",) ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ = TextDatasetReader({'train': text_path} , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ = {'text': 'string'}
SCREAMING_SNAKE_CASE_ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ = TextDatasetReader({'train': text_path} , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
if split:
SCREAMING_SNAKE_CASE_ = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ = 'train'
SCREAMING_SNAKE_CASE_ = {'train': text_path, 'test': text_path}
SCREAMING_SNAKE_CASE_ = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ = {'text': 'string'}
SCREAMING_SNAKE_CASE_ = TextDatasetReader(__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_text_datasetdict(__UpperCAmelCase , __UpperCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 31 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : List[Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ : Optional[Any] = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : PriorTransformer , _lowerCAmelCase : CLIPVisionModel , _lowerCAmelCase : CLIPImageProcessor , _lowerCAmelCase : HeunDiscreteScheduler , _lowerCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCAmelCase , image_encoder=_lowerCAmelCase , image_processor=_lowerCAmelCase , scheduler=_lowerCAmelCase , renderer=_lowerCAmelCase , )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ):
if latents is None:
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE_ = latents.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE_ = torch.device(F"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE_ = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : int ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(_lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCAmelCase , axis=0 )
if not isinstance(_lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = self.image_processor(_lowerCAmelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = image.to(dtype=self.image_encoder.dtype , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.image_encoder(_lowerCAmelCase )['last_hidden_state']
SCREAMING_SNAKE_CASE_ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
SCREAMING_SNAKE_CASE_ = image_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = torch.zeros_like(_lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCAmelCase )
def __call__( self : Tuple , _lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 25 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[torch.FloatTensor] = None , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(_lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE_ = image.shape[0]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
else:
raise ValueError(
F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCAmelCase )}" )
SCREAMING_SNAKE_CASE_ = self._execution_device
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_image(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# prior
self.scheduler.set_timesteps(_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.prior.config.num_embeddings
SCREAMING_SNAKE_CASE_ = self.prior.config.embedding_dim
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
SCREAMING_SNAKE_CASE_ = latents.reshape(latents.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.prior(
_lowerCAmelCase , timestep=_lowerCAmelCase , proj_embedding=_lowerCAmelCase , ).predicted_image_embedding
# remove the variance
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for i, latent in enumerate(_lowerCAmelCase ):
print()
SCREAMING_SNAKE_CASE_ = self.renderer.decode(
latent[None, :] , _lowerCAmelCase , size=_lowerCAmelCase , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.stack(_lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}" )
SCREAMING_SNAKE_CASE_ = images.cpu().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = [self.numpy_to_pil(_lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCAmelCase ) | 31 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : int = 4_00_00_00 ) -> int:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = b, a + b
return sum(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 31 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'num_attention_heads' ) )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=13 , _lowerCAmelCase : str=32 , _lowerCAmelCase : str=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=640 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Dict="silu" , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : str=32 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=10 , _lowerCAmelCase : str=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = last_hidden_size
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = conv_kernel_size
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = MobileViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MobileViTForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE_ = MobileViTConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowerCAmelCase_ ( self : Dict ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = 5
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE_ = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = MobileViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[Any] ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ = model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ = model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase ) | 31 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=512 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : List[str]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , *_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , *_lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , *_lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTDoubleHeadsModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , *_lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = OpenAIGPTForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase_ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=False ):
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = inputs_dict['labels']
SCREAMING_SNAKE_CASE_ = inputs_dict['labels']
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , n_embd=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Tuple ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_lowerCAmelCase ) # the president is
SCREAMING_SNAKE_CASE_ = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase ) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 1 |
import argparse
from collections import defaultdict
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = f"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__UpperCAmelCase , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
SCREAMING_SNAKE_CASE_ = f"class {class_name}("
SCREAMING_SNAKE_CASE_ = f"{4 * ' '}def {test_name}("
SCREAMING_SNAKE_CASE_ = f"{8 * ' '}{correct_line.split()[0]}"
SCREAMING_SNAKE_CASE_ = f"{16 * ' '}{correct_line.split()[0]}"
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = True
elif in_class and line.startswith(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
SCREAMING_SNAKE_CASE_ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
SCREAMING_SNAKE_CASE_ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
SCREAMING_SNAKE_CASE_ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"{spaces * ' '}{correct_line}" )
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , 'w' ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : str=None ) -> Tuple:
if fail is not None:
with open(__UpperCAmelCase , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = {l.strip() for l in f.readlines()}
else:
SCREAMING_SNAKE_CASE_ = None
with open(__UpperCAmelCase , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
SCREAMING_SNAKE_CASE_ = defaultdict(__UpperCAmelCase )
for line in correct_lines:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
lowerCamelCase__ : int = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 31 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = None
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(_lowerCAmelCase )[0]
check_json_file_has_correct_format(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(_lowerCAmelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class()
self.assertIsNotNone(_lowerCAmelCase ) | 31 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[Any]=None , ) -> Tuple:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : int=13 , _lowerCAmelCase : str=7 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=4 , _lowerCAmelCase : int=4 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Any=0.02 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = initializer_range
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : str ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE_ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
SCREAMING_SNAKE_CASE_ = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
SCREAMING_SNAKE_CASE_ = ['Sam']
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='jax' )
SCREAMING_SNAKE_CASE_ = model.generate(**_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'Sam is a great name. It means "sun" in Gaelic.'
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , **_lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text | 31 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (KDPMaDiscreteScheduler,)
lowercase_ = 10
def lowerCAmelCase_ ( self : List[Any] , **_lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_lowerCAmelCase )
return config
def lowerCAmelCase_ ( self : Optional[int] ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : List[Any] ):
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ = sample.to(_lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_lowerCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter.to(_lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = output.prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(_lowerCAmelCase ) )
if str(_lowerCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3 | 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ : List[str] = None
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCamelCase__ : Any = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = AlbertTokenizer
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : int="[CLS]" , _lowerCAmelCase : Any="[SEP]" , _lowerCAmelCase : List[str]="<unk>" , _lowerCAmelCase : int="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : int="[CLS]" , _lowerCAmelCase : str="[MASK]" , **_lowerCAmelCase : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE_ = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE_ = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
SCREAMING_SNAKE_CASE_ = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE_ = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE_ = 4
def _map_to_encoder_decoder_inputs(_lowerCAmelCase : Tuple ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE_ = tokenizer(batch['article'] , padding='max_length' , truncation=_lowerCAmelCase , max_length=512 )
SCREAMING_SNAKE_CASE_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=_lowerCAmelCase , max_length=128 )
SCREAMING_SNAKE_CASE_ = inputs.input_ids
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = outputs.input_ids
SCREAMING_SNAKE_CASE_ = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
SCREAMING_SNAKE_CASE_ = outputs.attention_mask
assert all(len(_lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(_lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = pred.label_ids
SCREAMING_SNAKE_CASE_ = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_lowerCAmelCase ) )] ) / len(_lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
SCREAMING_SNAKE_CASE_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_lowerCAmelCase , batch_size=_lowerCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = SeqaSeqTrainingArguments(
output_dir=_lowerCAmelCase , per_device_train_batch_size=_lowerCAmelCase , per_device_eval_batch_size=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , evaluation_strategy='steps' , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE_ = SeqaSeqTrainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# start training
trainer.train() | 31 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ : List[str] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__( self : List[str] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 255 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : Optional[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ = do_convert_rgb
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(_lowerCAmelCase , size=size['shortest_edge'] , default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Any , ):
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_lowerCAmelCase , size=(size['height'], size['width']) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Any , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : int = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase , param_name='size' , default_to_square=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase , param_name='crop_size' , default_to_square=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ = [convert_to_rgb(_lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ = {'pixel_values': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase ) | 31 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 1 |
import math
def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law') | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCamelCase__ : Dict = logging.getLogger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : torch.nn.Module , __UpperCAmelCase : BnbQuantizationConfig , __UpperCAmelCase : Union[str, os.PathLike] = None , __UpperCAmelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , __UpperCAmelCase : bool = False , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
SCREAMING_SNAKE_CASE_ = []
# custom device map
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE_ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE_ = get_keys_to_not_convert(__UpperCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__UpperCAmelCase )
# compatibility with peft
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = load_in_abit
SCREAMING_SNAKE_CASE_ = get_parameter_device(__UpperCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(__UpperCAmelCase , __UpperCAmelCase , modules_to_not_convert=__UpperCAmelCase )
# convert param to the right dtype
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE_ = name.replace('.weight' , '' ).replace('.bias' , '' )
SCREAMING_SNAKE_CASE_ = getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__UpperCAmelCase ):
param.to(__UpperCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE_ = replace_with_bnb_layers(
__UpperCAmelCase , __UpperCAmelCase , modules_to_not_convert=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = get_quantized_model_device_map(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , max_memory=__UpperCAmelCase , no_split_module_classes=__UpperCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCAmelCase , offload_state_dict=__UpperCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__UpperCAmelCase , device_map=__UpperCAmelCase , offload_dir=__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None ) -> Any:
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
SCREAMING_SNAKE_CASE_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = special_dtypes
SCREAMING_SNAKE_CASE_ = no_split_module_classes
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE_ = get_balanced_memory(
__UpperCAmelCase , low_zero=(device_map == 'balanced_low_0') , max_memory=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ = max_memory
SCREAMING_SNAKE_CASE_ = infer_auto_device_map(__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[str]=None ) -> str:
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=None , ) -> str:
SCREAMING_SNAKE_CASE_ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE_ = []
current_key_name.append(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE_ = '.'.join(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
SCREAMING_SNAKE_CASE_ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE_ = module.bias.data
bnb_module.requires_grad_(__UpperCAmelCase )
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _replace_with_bnb_layers(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[str]:
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE_ = deepcopy(__UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE_ = find_tied_parameters(__UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE_ = sum(__UpperCAmelCase , [] )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE_ = False
if hasattr(__UpperCAmelCase , 'base_model_prefix' ):
SCREAMING_SNAKE_CASE_ = not hasattr(__UpperCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE_ = list(model.named_children() )
SCREAMING_SNAKE_CASE_ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE_ = set(__UpperCAmelCase ) - set(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = list(set(__UpperCAmelCase ) ) + list(__UpperCAmelCase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE_ = ['.weight', '.bias']
SCREAMING_SNAKE_CASE_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE_ = name.replace(__UpperCAmelCase , '' )
filtered_module_names.append(__UpperCAmelCase )
return filtered_module_names
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
for m in model.modules():
if isinstance(__UpperCAmelCase , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase_ ( __UpperCAmelCase : nn.Module ) -> List[str]:
return next(parameter.parameters() ).device
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__UpperCAmelCase , __UpperCAmelCase , 0 , dtype=__UpperCAmelCase , value=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = param_name
SCREAMING_SNAKE_CASE_ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE_ = tensor_name.split('.' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE_ = getattr(__UpperCAmelCase , __UpperCAmelCase )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
SCREAMING_SNAKE_CASE_ = new_module
SCREAMING_SNAKE_CASE_ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE_ = False
offload_weight(module._parameters[tensor_name] , __UpperCAmelCase , __UpperCAmelCase , index=__UpperCAmelCase )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __UpperCAmelCase , index=__UpperCAmelCase , )
else:
offload_weight(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index=__UpperCAmelCase )
offload_weight(__UpperCAmelCase , param_name.replace('weight' , 'SCB' ) , __UpperCAmelCase , index=__UpperCAmelCase )
set_module_tensor_to_device(__UpperCAmelCase , __UpperCAmelCase , 'meta' , dtype=__UpperCAmelCase , value=torch.empty(*param.size() ) ) | 31 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Dict = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[Any] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
class lowerCamelCase_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ):
SCREAMING_SNAKE_CASE_ = row
SCREAMING_SNAKE_CASE_ = col
SCREAMING_SNAKE_CASE_ = graph
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[list[bool]] ):
# Checking all 8 elements surrounding nth element
SCREAMING_SNAKE_CASE_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE_ = [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE_ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ): # And finally, count all islands.
SCREAMING_SNAKE_CASE_ = [[False for j in range(self.COL )] for i in range(self.ROW )]
SCREAMING_SNAKE_CASE_ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
count += 1
return count | 31 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableUnCLIPPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowercase_ = False
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=_lowerCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL()
SCREAMING_SNAKE_CASE_ = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
SCREAMING_SNAKE_CASE_ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe('anime turle' , generator=_lowerCAmelCase , output_type='np' )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 31 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] ) -> str:
if height >= 1:
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
move_disk(__UpperCAmelCase , __UpperCAmelCase )
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
print('moving disk from' , __UpperCAmelCase , 'to' , __UpperCAmelCase )
def UpperCAmelCase_ ( ) -> int:
SCREAMING_SNAKE_CASE_ = int(input('Height of hanoi: ' ).strip() )
move_tower(__UpperCAmelCase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main() | 31 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : dict ) -> set:
SCREAMING_SNAKE_CASE_ = set()
# edges = list of graph's edges
SCREAMING_SNAKE_CASE_ = get_edges(__UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edges.pop()
chosen_vertices.add(__UpperCAmelCase )
chosen_vertices.add(__UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__UpperCAmelCase )
return chosen_vertices
def UpperCAmelCase_ ( __UpperCAmelCase : dict ) -> set:
SCREAMING_SNAKE_CASE_ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 31 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
try:
with open(__UpperCAmelCase , 'rb' ) as flax_state_f:
SCREAMING_SNAKE_CASE_ = from_bytes(__UpperCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCAmelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Dict:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE_ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa , __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
SCREAMING_SNAKE_CASE_ = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = ''
SCREAMING_SNAKE_CASE_ = flatten_dict(__UpperCAmelCase , sep='.' )
SCREAMING_SNAKE_CASE_ = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE_ = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE_ = jnp.transpose(__UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE_ = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
SCREAMING_SNAKE_CASE_ = '.'.join(__UpperCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE_ = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE_ = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(__UpperCAmelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
' use it for predictions and inference.' )
return pt_model | 31 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(_lowerCAmelCase , axis=-1 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
SCREAMING_SNAKE_CASE_ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ = top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ = np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 5
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
SCREAMING_SNAKE_CASE_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = 15
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ = ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = input_ids.copy()
SCREAMING_SNAKE_CASE_ = self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ = processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
SCREAMING_SNAKE_CASE_ = jax.jit(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jax.jit(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 31 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : Union[str, Any]=32 , _lowerCAmelCase : str=5 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Any=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : List[str]=4 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_choices
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCAmelCase , )
return config, input_ids, attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = FlaxDistilBertModelTester(self )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (1, 11, 768)
self.assertEqual(output.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) ) | 31 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 1 |
from __future__ import annotations
import requests
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> dict:
SCREAMING_SNAKE_CASE_ = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(__UpperCAmelCase ).json()
def UpperCAmelCase_ ( __UpperCAmelCase : int = 10 ) -> list[dict]:
SCREAMING_SNAKE_CASE_ = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
SCREAMING_SNAKE_CASE_ = requests.get(__UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__UpperCAmelCase ) for story_id in story_ids]
def UpperCAmelCase_ ( __UpperCAmelCase : int = 10 ) -> str:
SCREAMING_SNAKE_CASE_ = hackernews_top_stories(__UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**__UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown()) | 31 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 1 |
def UpperCAmelCase_ ( ) -> Tuple:
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCAmelCase_ ( ) -> Union[str, Any]:
return next(i for i in triangle_number_generator() if count_divisors(__UpperCAmelCase ) > 5_00 )
if __name__ == "__main__":
print(solution()) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE_ = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCAmelCase , atol=1E-3 ) ) | 31 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 1 |
import colorsys
from PIL import Image # type: ignore
def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : int ) -> float:
SCREAMING_SNAKE_CASE_ = x
SCREAMING_SNAKE_CASE_ = y
for step in range(__UpperCAmelCase ): # noqa: B007
SCREAMING_SNAKE_CASE_ = a * a - b * b + x
SCREAMING_SNAKE_CASE_ = 2 * a * b + y
SCREAMING_SNAKE_CASE_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCAmelCase_ ( __UpperCAmelCase : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def UpperCAmelCase_ ( __UpperCAmelCase : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__UpperCAmelCase , 1 , 1 ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int = 8_00 , __UpperCAmelCase : int = 6_00 , __UpperCAmelCase : float = -0.6 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 3.2 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : bool = True , ) -> Image.Image:
SCREAMING_SNAKE_CASE_ = Image.new('RGB' , (image_width, image_height) )
SCREAMING_SNAKE_CASE_ = img.load()
# loop through the image-coordinates
for image_x in range(__UpperCAmelCase ):
for image_y in range(__UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
SCREAMING_SNAKE_CASE_ = figure_width / image_width * image_height
SCREAMING_SNAKE_CASE_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
SCREAMING_SNAKE_CASE_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
SCREAMING_SNAKE_CASE_ = get_distance(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
SCREAMING_SNAKE_CASE_ = get_color_coded_rgb(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = get_black_and_white_rgb(__UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase__ : Optional[int] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 31 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 1 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCAmelCase_ ( __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 , __UpperCAmelCase : float = 1 , __UpperCAmelCase : float = 1.0E4 , __UpperCAmelCase : bool = False , __UpperCAmelCase : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
SCREAMING_SNAKE_CASE_ = float(embedding_dim // 2 )
SCREAMING_SNAKE_CASE_ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
SCREAMING_SNAKE_CASE_ = min_timescale * jnp.exp(jnp.arange(__UpperCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(__UpperCAmelCase , 1 ) * jnp.expand_dims(__UpperCAmelCase , 0 )
# scale embeddings
SCREAMING_SNAKE_CASE_ = scale * emb
if flip_sin_to_cos:
SCREAMING_SNAKE_CASE_ = jnp.concatenate([jnp.cos(__UpperCAmelCase ), jnp.sin(__UpperCAmelCase )] , axis=1 )
else:
SCREAMING_SNAKE_CASE_ = jnp.concatenate([jnp.sin(__UpperCAmelCase ), jnp.cos(__UpperCAmelCase )] , axis=1 )
SCREAMING_SNAKE_CASE_ = jnp.reshape(__UpperCAmelCase , [jnp.shape(__UpperCAmelCase )[0], embedding_dim] )
return signal
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 32
lowercase_ = jnp.floataa
@nn.compact
def __call__( self : int , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.silu(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_lowerCAmelCase )
return temb
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
lowercase_ = 32
lowercase_ = False
lowercase_ = 1
@nn.compact
def __call__( self : Union[str, Any] , _lowerCAmelCase : int ):
return get_sinusoidal_embeddings(
_lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift ) | 31 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__( self : Tuple , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=768 , **_lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = transformerDimSize
SCREAMING_SNAKE_CASE_ = imageDimSize
super().__init__(**_lowerCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : str ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.transformer(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCAmelCase ), embs | 31 | 1 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 1 |
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Dict ):
return str(self.k )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs | 31 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase__ : Optional[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def UpperCAmelCase_ ( __UpperCAmelCase : List[str]=None ) -> Optional[int]:
if subparsers is not None:
SCREAMING_SNAKE_CASE_ = subparsers.add_parser('tpu-config' , description=_description )
else:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE_ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__UpperCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__UpperCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
SCREAMING_SNAKE_CASE_ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__UpperCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE_ = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE_ = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE_ = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE_ = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE_ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE_ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
SCREAMING_SNAKE_CASE_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE_ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
SCREAMING_SNAKE_CASE_ = '; '.join(__UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE_ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(__UpperCAmelCase )}" )
return
subprocess.run(__UpperCAmelCase )
print('Successfully setup pod.' )
def UpperCAmelCase_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tpu_command_parser()
SCREAMING_SNAKE_CASE_ = parser.parse_args()
tpu_command_launcher(__UpperCAmelCase ) | 31 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "swinv2"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _lowerCAmelCase : Optional[Any]=224 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=96 , _lowerCAmelCase : Dict=[2, 2, 6, 2] , _lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , _lowerCAmelCase : str=7 , _lowerCAmelCase : List[Any]=4.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=False , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[Any]=1E-5 , _lowerCAmelCase : str=32 , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = (0, 0, 0, 0) | 31 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase__ : int = datasets.utils.logging.get_logger(__name__)
lowerCamelCase__ : str = ['names', 'prefix']
lowerCamelCase__ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowerCamelCase__ : Optional[int] = ['encoding_errors', 'on_bad_lines']
lowerCamelCase__ : str = ['date_format']
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowercase_ = ","
lowercase_ = None
lowercase_ = "infer"
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = True
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = False
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = True
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = None
lowercase_ = "."
lowercase_ = None
lowercase_ = '"'
lowercase_ = 0
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = True
lowercase_ = True
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = None
lowercase_ = 10_000
lowercase_ = None
lowercase_ = "strict"
lowercase_ = "error"
lowercase_ = None
def lowerCAmelCase_ ( self : Any ):
if self.delimiter is not None:
SCREAMING_SNAKE_CASE_ = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE_ = self.column_names
@property
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase_ = CsvConfig
def lowerCAmelCase_ ( self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Optional[int] ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) )
return splits
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : pa.Table ):
if self.config.features is not None:
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema
if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(_lowerCAmelCase , _lowerCAmelCase )
return pa_table
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = pa.Table.from_pandas(_lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(_lowerCAmelCase )}: {e}" )
raise | 31 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : str = 'T5Config'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "mt5"
lowercase_ = MTaConfig
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "mt5"
lowercase_ = MTaConfig
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "mt5"
lowercase_ = MTaConfig | 31 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ : Tuple = getLogger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int = 8 , __UpperCAmelCase : int = 10_24 , __UpperCAmelCase : Tuple="val" , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]="summarization" , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Dict = None , __UpperCAmelCase : Any="" , **__UpperCAmelCase : List[Any] , ) -> Dict:
SCREAMING_SNAKE_CASE_ = str(__UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = Path(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__UpperCAmelCase , __UpperCAmelCase ) # update config with task specific params
SCREAMING_SNAKE_CASE_ = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE_ = num_return_sequences
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE_ = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE_ = prefix or getattr(model.config , 'prefix' , '' ) or ''
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , max_target_length=10_24 , type_path=__UpperCAmelCase , n_obs=__UpperCAmelCase , prefix=__UpperCAmelCase , **__UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE_ = ds.make_sortish_sampler(__UpperCAmelCase , distributed=__UpperCAmelCase , add_extra_examples=__UpperCAmelCase , shuffle=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE_ = []
for batch in tqdm(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=__UpperCAmelCase , num_beams=__UpperCAmelCase , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = batch['ids']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ = chunks(__UpperCAmelCase , __UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__UpperCAmelCase ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return results, sampler.num_replicas
def UpperCAmelCase_ ( ) -> Dict:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=__UpperCAmelCase , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=__UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=__UpperCAmelCase , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=__UpperCAmelCase , default=__UpperCAmelCase )
parser.add_argument(
'--type_path' , type=__UpperCAmelCase , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=__UpperCAmelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=__UpperCAmelCase , default=8 , required=__UpperCAmelCase , help='batch size' )
parser.add_argument(
'--local_rank' , type=__UpperCAmelCase , default=-1 , required=__UpperCAmelCase , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=__UpperCAmelCase , default=1 , required=__UpperCAmelCase , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=__UpperCAmelCase , default=6_00 , required=__UpperCAmelCase , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument('--tgt_lang' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase )
parser.add_argument(
'--prefix' , type=__UpperCAmelCase , required=__UpperCAmelCase , default=__UpperCAmelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_known_args()
SCREAMING_SNAKE_CASE_ = parse_numeric_n_bool_cl_kwargs(__UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
SCREAMING_SNAKE_CASE_ = Path(args.save_dir + '_tmp' )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase ) # this handles locking.
SCREAMING_SNAKE_CASE_ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE_ = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE_ = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = eval_data_dir(
args.data_dir , __UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = gather_results_from_each_node(__UpperCAmelCase , __UpperCAmelCase , args.sync_timeout )
SCREAMING_SNAKE_CASE_ = combine_partial_results(__UpperCAmelCase )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE_ = save_dir.joinpath('pseudolabel_results.json' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(__UpperCAmelCase , __UpperCAmelCase )
return
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(__UpperCAmelCase ) as f:
SCREAMING_SNAKE_CASE_ = [x.rstrip() for x in f.readlines()][: len(__UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE_ = 'translation' in args.task
SCREAMING_SNAKE_CASE_ = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE_ = 'bleu' if calc_bleu else 'rouge'
SCREAMING_SNAKE_CASE_ = score_fn(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = time.time() - start_time
SCREAMING_SNAKE_CASE_ = round(runtime / metrics['n_obs'] , 4 )
SCREAMING_SNAKE_CASE_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE_ = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase )
print(__UpperCAmelCase )
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(__UpperCAmelCase , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> List:
SCREAMING_SNAKE_CASE_ = []
for partial_result in partial_results:
records.extend(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["id"] )
SCREAMING_SNAKE_CASE_ = [x['pred'] for x in records]
return preds
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE_ = time.time()
logger.info('waiting for all nodes to finish' )
SCREAMING_SNAKE_CASE_ = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE_ = list(save_dir.glob('rank_*.json' ) )
if len(__UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE_ = lmap(__UpperCAmelCase , __UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate() | 31 |
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Optional[Any] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 31 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy"
def lowerCAmelCase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int=0 , _lowerCAmelCase : Dict=(4, 4, 64, 64) , _lowerCAmelCase : List[str]=False ):
SCREAMING_SNAKE_CASE_ = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE_ = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return image
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Optional[Any]="CompVis/stable-diffusion-v1-4" ):
SCREAMING_SNAKE_CASE_ = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE_ = 'bf16' if fpaa else None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = FlaxUNetaDConditionModel.from_pretrained(
_lowerCAmelCase , subfolder='unet' , dtype=_lowerCAmelCase , revision=_lowerCAmelCase )
return model, params
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : List[str]=(4, 77, 768) , _lowerCAmelCase : List[str]=False ):
SCREAMING_SNAKE_CASE_ = jnp.bfloataa if fpaa else jnp.floataa
SCREAMING_SNAKE_CASE_ = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_latents(_lowerCAmelCase , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_encoder_hidden_states(_lowerCAmelCase , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.apply(
{'params': params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_latents(_lowerCAmelCase , shape=(4, 4, 96, 96) , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_encoder_hidden_states(_lowerCAmelCase , shape=(4, 77, 1_024) , fpaa=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.apply(
{'params': params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
SCREAMING_SNAKE_CASE_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-2 ) | 31 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Tuple = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "funnel"
lowercase_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : int , _lowerCAmelCase : Optional[int]=30_522 , _lowerCAmelCase : List[str]=[4, 4, 4] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : int=768 , _lowerCAmelCase : Optional[Any]=12 , _lowerCAmelCase : Optional[Any]=64 , _lowerCAmelCase : Optional[Any]=3_072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Dict=None , _lowerCAmelCase : str=1E-9 , _lowerCAmelCase : Any="mean" , _lowerCAmelCase : Union[str, Any]="relative_shift" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = block_sizes
SCREAMING_SNAKE_CASE_ = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
SCREAMING_SNAKE_CASE_ = num_decoder_layers
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = d_head
SCREAMING_SNAKE_CASE_ = d_inner
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = initializer_std
SCREAMING_SNAKE_CASE_ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
SCREAMING_SNAKE_CASE_ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = separate_cls
SCREAMING_SNAKE_CASE_ = truncate_seq
SCREAMING_SNAKE_CASE_ = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ):
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] ):
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' ) | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : int = 10 , __UpperCAmelCase : int = 22 ) -> int:
SCREAMING_SNAKE_CASE_ = range(1 , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = range(1 , __UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''') | 31 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from __future__ import annotations
from typing import TypedDict
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> list[str]:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(__UpperCAmelCase ) )]
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> BWTTransformDict:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
SCREAMING_SNAKE_CASE_ = all_rotations(__UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCAmelCase ),
}
return response
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int ) -> str:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
SCREAMING_SNAKE_CASE_ = int(__UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(__UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
SCREAMING_SNAKE_CASE_ = [''] * len(__UpperCAmelCase )
for _ in range(len(__UpperCAmelCase ) ):
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
lowerCamelCase__ : List[str] = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCamelCase__ : Dict = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : list , __UpperCAmelCase : int , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 ) -> int:
SCREAMING_SNAKE_CASE_ = right or len(__UpperCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__UpperCAmelCase , __UpperCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ = {
'input_ids': tf.convert_to_tensor([[0, 2_646, 10_269, 83, 99_942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )['last_hidden_state']
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 31 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,) | 31 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = VQModel
lowercase_ = "sample"
@property
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : str=(32, 32) ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCAmelCase )
return {"sample": image}
@property
def lowerCAmelCase_ ( self : str ):
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self : List[str] ):
return (3, 32, 32)
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(_lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
SCREAMING_SNAKE_CASE_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
SCREAMING_SNAKE_CASE_ = image.to(_lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) | 31 |
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase_ ( ) -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 2
while True:
SCREAMING_SNAKE_CASE_ = factor_map.pop(__UpperCAmelCase , __UpperCAmelCase )
if factor:
SCREAMING_SNAKE_CASE_ = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE_ = factor
else:
SCREAMING_SNAKE_CASE_ = prime
yield prime
prime += 1
def UpperCAmelCase_ ( __UpperCAmelCase : float = 1E10 ) -> int:
SCREAMING_SNAKE_CASE_ = sieve()
SCREAMING_SNAKE_CASE_ = 1
while True:
SCREAMING_SNAKE_CASE_ = next(__UpperCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCAmelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 31 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )['last_hidden_state']
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 31 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , _lowerCAmelCase : Node ):
SCREAMING_SNAKE_CASE_ = tree
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Node | None ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase ) | 31 | 1 |
from __future__ import annotations
from collections import namedtuple
def UpperCAmelCase_ ( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float ) -> tuple:
SCREAMING_SNAKE_CASE_ = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 31 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Tuple: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCAmelCase_ ( ) -> str:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE_ = [1, 2, 3]
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=2 )
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'a': 1, 'b': 2}
SCREAMING_SNAKE_CASE_ = {'a': [1, 2], 'b': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'a': {'1': 1}, 'b': 2}
SCREAMING_SNAKE_CASE_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'a': 2, 'b': 3}
SCREAMING_SNAKE_CASE_ = {'a': [2, 3], 'b': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'a': {'1': 2}, 'b': 3}
SCREAMING_SNAKE_CASE_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa | 31 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Optional[int] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 | 1 |
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE_ = [1]
for i in range(2 , __UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = list(range(__UpperCAmelCase ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_ = factorials.pop()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = divmod(__UpperCAmelCase , __UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCamelCase_ :
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return self.get_dummy_input()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=False , ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE_ = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {'hidden_states': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE_ = 128
SCREAMING_SNAKE_CASE_ = randn_tensor((batch_size, temb_channels) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE_ = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE_ = (randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=_lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, 32, 32) ).to(_lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE_ = randn_tensor(((batch_size, 3) + sizes) , generator=_lowerCAmelCase , device=_lowerCAmelCase )
return dummy_input
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
unet_block.to(_lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = unet_block(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , _lowerCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.block_class(**_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = output[0]
SCREAMING_SNAKE_CASE_ = torch.device(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = randn_tensor(output.shape , device=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
loss.backward() | 31 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ : int = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ : int = concatenate_datasets
lowerCamelCase__ : List[Any] = DownloadConfig
lowerCamelCase__ : int = DownloadManager
lowerCamelCase__ : List[str] = DownloadMode
lowerCamelCase__ : List[str] = DownloadConfig
lowerCamelCase__ : int = DownloadMode
lowerCamelCase__ : List[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 31 |
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix)) | 31 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["transformers", "torch", "note_seq"]
def __init__( self : Tuple , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , *_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : List[str] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 31 |
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE_ = f"The input value of [n={number}] has to be > 0"
raise ValueError(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE_ = num - 1
SCREAMING_SNAKE_CASE_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 31 | 1 |
lowerCamelCase__ : int = tuple[float, float, float]
lowerCamelCase__ : Optional[Any] = tuple[float, float, float]
def UpperCAmelCase_ ( __UpperCAmelCase : Pointad , __UpperCAmelCase : Pointad ) -> Vectorad:
SCREAMING_SNAKE_CASE_ = end_pointa[0] - end_pointa[0]
SCREAMING_SNAKE_CASE_ = end_pointa[1] - end_pointa[1]
SCREAMING_SNAKE_CASE_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase_ ( __UpperCAmelCase : Vectorad , __UpperCAmelCase : Vectorad ) -> Vectorad:
SCREAMING_SNAKE_CASE_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
SCREAMING_SNAKE_CASE_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
SCREAMING_SNAKE_CASE_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase_ ( __UpperCAmelCase : Vectorad , __UpperCAmelCase : int ) -> bool:
return tuple(round(__UpperCAmelCase , __UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase_ ( __UpperCAmelCase : Pointad , __UpperCAmelCase : Pointad , __UpperCAmelCase : Pointad , __UpperCAmelCase : int = 10 ) -> bool:
SCREAMING_SNAKE_CASE_ = create_vector(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = create_vector(__UpperCAmelCase , __UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) | 31 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase__ : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase__ : Union[str, Any] = TaTokenizerFast
lowerCamelCase__ : Dict = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase__ : int = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
) | 31 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : Optional[Any] = False
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase__ : Optional[Any] = parser.parse_args()
lowerCamelCase__ : List[Any] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCamelCase__ : str = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCamelCase__ : str = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCamelCase__ : Optional[int] = reader.read()
lowerCamelCase__ : Optional[int] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCamelCase__ : Any = UNetaDModel(**config)
else:
lowerCamelCase__ : Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCamelCase__ : Optional[Any] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase__ : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase__ : List[str] = config[key]
del config[key]
lowerCamelCase__ : str = [k.replace('UNetRes', '') for k in config['down_block_types']]
lowerCamelCase__ : List[str] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
lowerCamelCase__ : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCamelCase__ : Dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCamelCase__ : int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCamelCase__ : Dict = param_value
lowerCamelCase__ : str = True
if not has_changed:
lowerCamelCase__ : Dict = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 31 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch
def lowerCAmelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_lowerCAmelCase )
BertModel.from_pretrained(_lowerCAmelCase )
BertTokenizer.from_pretrained(_lowerCAmelCase )
pipeline(task='fill-mask' , model=_lowerCAmelCase )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE_ = self.get_env()
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE_ = '1'
SCREAMING_SNAKE_CASE_ = subprocess.run(_lowerCAmelCase , env=_lowerCAmelCase , check=_lowerCAmelCase , capture_output=_lowerCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() ) | 31 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.