code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''segformer'''
def __init__( self , lowercase_=3 , lowercase_=4 , lowercase_=[2, 2, 2, 2] , lowercase_=[8, 4, 2, 1] , lowercase_=[3_2, 6_4, 1_6_0, 2_5_6] , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[1, 2, 5, 8] , lowercase_=[4, 4, 4, 4] , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=2_5_6 , lowercase_=2_5_5 , **lowercase_ , ) -> int:
super().__init__(**lowercase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase_ , )
__snake_case = num_channels
__snake_case = num_encoder_blocks
__snake_case = depths
__snake_case = sr_ratios
__snake_case = hidden_sizes
__snake_case = patch_sizes
__snake_case = strides
__snake_case = mlp_ratios
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = drop_path_rate
__snake_case = layer_norm_eps
__snake_case = decoder_hidden_size
__snake_case = kwargs.get('reshape_last_stage' , lowercase_)
__snake_case = semantic_loss_ignore_index
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _a ( self) -> float:
return 1e-4
@property
def _a ( self) -> int:
return 1_2
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] ) -> List[str]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def A ( snake_case__ : Tuple , snake_case__ : Any , snake_case__ : int ) -> Dict:
'''simple docstring'''
if "metadata" in layer:
__snake_case = layer.split('metadata' )
__snake_case = ''.join(split_layer[0] )[:-1]
__snake_case = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__snake_case = layer.split('kvstore' )
__snake_case = ''.join(split_layer[0] )[:-1]
__snake_case = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__snake_case = layer.split('/' )
__snake_case = '/'.join(split_layer[:-1] )
__snake_case = (split_layer[-1],)
if "kvstore/path" in layer:
__snake_case = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__snake_case = 'file'
else:
__snake_case = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
__snake_case = rename_keys(snake_case__ )
__snake_case = {}
for k, v in current_block.items():
__snake_case = v
__snake_case = new_current_block
torch.save(snake_case__ , snake_case__ )
def A ( snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str = WEIGHTS_NAME ) -> Optional[int]:
'''simple docstring'''
__snake_case = convert_file_size_to_int(snake_case__ )
__snake_case = []
__snake_case = {}
__snake_case = 0
__snake_case = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
__snake_case = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__snake_case = flatten_dict(snake_case__ , sep='/' )
__snake_case = {}
for layer in checkpoint_info.keys():
__snake_case , __snake_case , __snake_case = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
__snake_case = content
else:
__snake_case = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__snake_case = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__snake_case = torch.tensor(snake_case__ )
__snake_case = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__snake_case , __snake_case = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case__ )
__snake_case = '/'.join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__snake_case = os.path.join(
snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__snake_case = {}
__snake_case = 0
__snake_case = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__snake_case = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__snake_case = {}
__snake_case = {}
for idx, shard in enumerate(snake_case__ ):
__snake_case = weights_name.replace(
'.bin' , f"-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
__snake_case = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
__snake_case = shard
for key in shard:
__snake_case = shard_file
# Add the metadata
__snake_case = {'total_size': total_size}
__snake_case = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' ) as f:
__snake_case = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n'
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A ( ) -> Optional[int]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__snake_case = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__snake_case = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
__snake_case = TaTokenizer.from_pretrained('t5-small' )
__snake_case = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__snake_case = tokenizer(snake_case__ , return_tensors='pt' ).input_ids
__snake_case = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
def A ( snake_case__ : List[str] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def A ( snake_case__ : Dict , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
__snake_case = [[float('inf' ) for _ in range(snake_case__ )] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
__snake_case = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(snake_case__ ):
# looping through rows of graph array
for i in range(snake_case__ ):
# looping through columns of graph array
for j in range(snake_case__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__snake_case = dist[i][k] + dist[k][j]
_print_dist(snake_case__ , snake_case__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = int(input("Enter number of vertices: "))
UpperCAmelCase__ : int = int(input("Enter number of edges: "))
UpperCAmelCase__ : Optional[int] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase__ : List[str] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
UpperCAmelCase__ : Dict = int(input("Enter source:"))
UpperCAmelCase__ : Optional[int] = int(input("Enter destination:"))
UpperCAmelCase__ : List[str] = float(input("Enter weight:"))
UpperCAmelCase__ : Optional[int] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
UpperCAmelCase__ : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.35_5818,
}
def A ( snake_case__ : str , snake_case__ : str , snake_case__ : float ) -> float:
'''simple docstring'''
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__snake_case = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(snake_case__ )}"
)
raise ValueError(snake_case__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
def A ( snake_case__ : int , snake_case__ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__snake_case = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
__snake_case = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
__snake_case = max(len(snake_case__ ) , len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = question_encoder
__snake_case = generator
__snake_case = self.question_encoder
def _a ( self , lowercase_) -> List[str]:
if os.path.isfile(lowercase_):
raise ValueError(F"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(lowercase_ , exist_ok=lowercase_)
__snake_case = os.path.join(lowercase_ , 'question_encoder_tokenizer')
__snake_case = os.path.join(lowercase_ , 'generator_tokenizer')
self.question_encoder.save_pretrained(lowercase_)
self.generator.save_pretrained(lowercase_)
@classmethod
def _a ( cls , lowercase_ , **lowercase_) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__snake_case = kwargs.pop('config' , lowercase_)
if config is None:
__snake_case = RagConfig.from_pretrained(lowercase_)
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder='question_encoder_tokenizer')
__snake_case = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder='generator_tokenizer')
return cls(question_encoder=lowercase_ , generator=lowercase_)
def __call__( self , *lowercase_ , **lowercase_) -> Tuple:
return self.current_tokenizer(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> str:
return self.generator.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.generator.decode(*lowercase_ , **lowercase_)
def _a ( self) -> int:
__snake_case = self.question_encoder
def _a ( self) -> Optional[int]:
__snake_case = self.generator
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = "longest" , lowercase_ = None , lowercase_ = True , **lowercase_ , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase_ , )
if max_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__snake_case = self.current_tokenizer.model_max_length
__snake_case = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
__snake_case = labels['input_ids']
return model_inputs
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''rwkv'''
__UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , lowercase_=5_0_2_7_7 , lowercase_=1_0_2_4 , lowercase_=4_0_9_6 , lowercase_=3_2 , lowercase_=None , lowercase_=None , lowercase_=1e-5 , lowercase_=0 , lowercase_=0 , lowercase_=6 , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Optional[Any]:
__snake_case = vocab_size
__snake_case = context_length
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case = layer_norm_epsilon
__snake_case = rescale_every
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = len(snake_case__ )
__snake_case = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__snake_case = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
def A ( snake_case__ : float ) -> float:
'''simple docstring'''
return 10 - x * x
def A ( snake_case__ : float , snake_case__ : float ) -> float:
'''simple docstring'''
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('Wrong space!' )
__snake_case = a
while (b - a) >= 0.01:
# Find middle point
__snake_case = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
__snake_case = c
else:
__snake_case = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
from __future__ import annotations
UpperCAmelCase__ : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class __lowercase :
def __init__( self , lowercase_ , lowercase_) -> None:
__snake_case = graph
# mapping node to its parent in resulting breadth first tree
__snake_case = {}
__snake_case = source_vertex
def _a ( self) -> None:
__snake_case = {self.source_vertex}
__snake_case = None
__snake_case = [self.source_vertex] # first in first out queue
while queue:
__snake_case = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase_)
__snake_case = vertex
queue.append(lowercase_)
def _a ( self , lowercase_) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case = self.parent.get(lowercase_)
if target_vertex_parent is None:
__snake_case = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(lowercase_)
return self.shortest_path(lowercase_) + F"->{target_vertex}"
if __name__ == "__main__":
UpperCAmelCase__ : Dict = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ : Dict = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = BertConfig.from_json_file(snake_case__ )
print(f"Building PyTorch model from configuration: {config}" )
__snake_case = BertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCAmelCase__ : Optional[int] = float("nan")
class __lowercase :
def __init__( self , lowercase_) -> Optional[Any]:
__snake_case = sys.stdout
__snake_case = open(lowercase_ , 'a')
def __getattr__( self , lowercase_) -> Tuple:
return getattr(self.stdout , lowercase_)
def _a ( self , lowercase_) -> str:
self.stdout.write(lowercase_)
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , lowercase_ , 0 , re.M))
def A ( snake_case__ : Union[str, Any]=80 , snake_case__ : str=False ) -> Any:
'''simple docstring'''
__snake_case = []
# deal with critical env vars
__snake_case = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
__snake_case = os.environ.get(snake_case__ , snake_case__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(snake_case__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ''
while len(snake_case__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case__ )
__snake_case = ''
return "\\\n".join(snake_case__ )
def A ( snake_case__ : Any , snake_case__ : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
__snake_case = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[str] ) -> Dict:
'''simple docstring'''
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
__snake_case = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
__snake_case = variation.replace(' ' , '-' )
with open(Path(snake_case__ ) / f"log.{prefix}.stdout.txt" , 'w' ) as f:
f.write(result.stdout )
with open(Path(snake_case__ ) / f"log.{prefix}.stderr.txt" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , 'r' , encoding='utf-8' ) as f:
__snake_case = json.load(snake_case__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict , ) -> int:
'''simple docstring'''
__snake_case = []
__snake_case = []
__snake_case = f"{id}: {variation:<{longest_variation_len}}"
__snake_case = f"{preamble}: "
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ):
__snake_case = process_run_single(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(snake_case__ ):
metrics.append(snake_case__ )
results.append(snake_case__ )
outcome += "✓"
else:
outcome += "✘"
__snake_case = f"\33[2K\r{outcome}"
if len(snake_case__ ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = f"{outcome} {mean_target}"
if len(snake_case__ ) > 1:
results_str += f" {tuple(round(snake_case__ , 2 ) for x in results )}"
print(snake_case__ )
__snake_case = variation
return mean_metrics
else:
print(snake_case__ )
return {variation_key: variation, target_metric_key: nan}
def A ( ) -> Optional[Any]:
'''simple docstring'''
__snake_case = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def A ( snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pd.DataFrame(snake_case__ )
__snake_case = 'variation'
__snake_case = 'diff_%'
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case__ ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case__ ):
__snake_case = df.apply(
lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(snake_case__ , axis='columns' ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
__snake_case = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' )
__snake_case = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' )
__snake_case = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )]
print('\n\n'.join(snake_case__ ) )
def A ( ) -> str:
'''simple docstring'''
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
__snake_case = get_base_command(snake_case__ , snake_case__ )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) )
__snake_case = max(len(snake_case__ ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
__snake_case = Tee(snake_case__ )
print(f"\n*** Running {len(snake_case__ )} benchmarks:" )
print(f"Base command: {' '.join(snake_case__ )}" )
__snake_case = 'variation'
__snake_case = []
for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) )
process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ )
if __name__ == "__main__":
main()
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _a ( self) -> None:
__snake_case = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
__snake_case = Vector()
def _a ( self) -> None:
__snake_case = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowercase_) , '(0,0,0,0,0,1)')
def _a ( self) -> None:
__snake_case = Vector([1, 2, 3, 4])
self.assertEqual(len(lowercase_) , 4)
def _a ( self) -> None:
__snake_case = Vector([1, 2])
__snake_case = Vector([1, 2, 3, 4, 5])
__snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
__snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def _a ( self) -> None:
__snake_case = Vector([1, 2, 3])
__snake_case = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def _a ( self) -> None:
__snake_case = Vector([1, 2, 3])
__snake_case = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def _a ( self) -> None:
__snake_case = Vector([1, 2, 3])
__snake_case = Vector([2, -1, 4]) # for test of dot product
__snake_case = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , '(3.0,6.0,9.0)')
self.assertEqual((a * b) , 0)
def _a ( self) -> None:
self.assertEqual(str(zero_vector(1_0)).count('0') , 1_0)
def _a ( self) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1)) , '(0,1,0)')
def _a ( self) -> None:
__snake_case = Vector([1, 2, 3])
__snake_case = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowercase_ , lowercase_)) , '(3,4,7)')
def _a ( self) -> None:
__snake_case = Vector([1, 0, 0, 0, 0, 0])
__snake_case = x.copy()
self.assertEqual(str(lowercase_) , str(lowercase_))
def _a ( self) -> None:
__snake_case = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowercase_) , '(0,1,0)')
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(lowercase_))
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
__snake_case = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowercase_ , lowercase_))
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
__snake_case = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowercase_ , lowercase_))
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
__snake_case = Vector([1, 2, 3])
self.assertEqual('(14,32,50)' , str(a * x))
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2))
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(lowercase_))
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b))
def _a ( self) -> None:
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3)
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b))
def _a ( self) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A ( snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return x + 2
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Tuple:
__snake_case = 'x = 3'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result == 3
self.assertDictEqual(lowercase_ , {'x': 3})
__snake_case = 'x = y'
__snake_case = {'y': 5}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 5, 'y': 5})
def _a ( self) -> List[Any]:
__snake_case = 'y = add_two(x)'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 5})
# Won't work without the tool
with CaptureStdout() as out:
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result is None
assert "tried to execute add_two" in out.out
def _a ( self) -> str:
__snake_case = 'x = 3'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result == 3
self.assertDictEqual(lowercase_ , {'x': 3})
def _a ( self) -> int:
__snake_case = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 5})
self.assertDictEqual(lowercase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def _a ( self) -> List[Any]:
__snake_case = 'x = 3\ny = 5'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 5})
def _a ( self) -> Optional[Any]:
__snake_case = 'text = f\'This is x: {x}.\''
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowercase_ , {'x': 3, 'text': 'This is x: 3.'})
def _a ( self) -> Optional[int]:
__snake_case = 'if x <= 3:\n y = 2\nelse:\n y = 5'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 2})
__snake_case = {'x': 8}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 8, 'y': 5})
def _a ( self) -> Union[str, Any]:
__snake_case = 'test_list = [x, add_two(x)]'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
self.assertListEqual(lowercase_ , [3, 5])
self.assertDictEqual(lowercase_ , {'x': 3, 'test_list': [3, 5]})
def _a ( self) -> Optional[Any]:
__snake_case = 'y = x'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {} , state=lowercase_)
assert result == 3
self.assertDictEqual(lowercase_ , {'x': 3, 'y': 3})
def _a ( self) -> Optional[int]:
__snake_case = 'test_list = [x, add_two(x)]\ntest_list[1]'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'test_list': [3, 5]})
__snake_case = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
__snake_case = {'x': 3}
__snake_case = evaluate(lowercase_ , {'add_two': add_two} , state=lowercase_)
assert result == 5
self.assertDictEqual(lowercase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}})
def _a ( self) -> List[str]:
__snake_case = 'x = 0\nfor i in range(3):\n x = i'
__snake_case = {}
__snake_case = evaluate(lowercase_ , {'range': range} , state=lowercase_)
assert result == 2
self.assertDictEqual(lowercase_ , {'x': 2, 'i': 2})
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
UpperCAmelCase__ : Dict = logging.getLogger(__name__)
def A ( snake_case__ : str ) -> List[str]:
'''simple docstring'''
__snake_case = git.Repo(search_parent_directories=snake_case__ )
__snake_case = {
'repo_id': str(snake_case__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(snake_case__ , 'git_log.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ , indent=4 )
def A ( snake_case__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
if params.n_gpu <= 0:
__snake_case = 0
__snake_case = -1
__snake_case = True
__snake_case = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
__snake_case = int(os.environ['WORLD_SIZE'] )
__snake_case = int(os.environ['N_GPU_NODE'] )
__snake_case = int(os.environ['RANK'] )
# number of nodes / node ID
__snake_case = params.world_size // params.n_gpu_per_node
__snake_case = params.global_rank // params.n_gpu_per_node
__snake_case = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
__snake_case = 1
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 1
__snake_case = 1
__snake_case = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__snake_case = params.node_id == 0 and params.local_rank == 0
__snake_case = params.n_nodes > 1
# summary
__snake_case = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def A ( snake_case__ : Dict ) -> Tuple:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = 1
@register_to_config
def __init__( self , lowercase_ = 2_0_0_0 , lowercase_ = 0.15 , lowercase_ = 0.01 , lowercase_ = 1348.0 , lowercase_ = 1e-5 , lowercase_ = 1 , ) -> List[str]:
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
self.set_sigmas(lowercase_ , lowercase_ , lowercase_ , lowercase_)
def _a ( self , lowercase_ , lowercase_ = None) -> torch.FloatTensor:
return sample
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None) -> List[Any]:
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case = torch.linspace(1 , lowercase_ , lowercase_ , device=lowercase_)
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None) -> List[Any]:
__snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase_ , lowercase_)
__snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case = torch.exp(torch.linspace(math.log(lowercase_) , math.log(lowercase_) , lowercase_))
__snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def _a ( self , lowercase_ , lowercase_) -> List[Any]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case = timesteps.to(self.discrete_sigmas.device)
__snake_case = self.discrete_sigmas[timesteps].to(sample.device)
__snake_case = self.get_adjacent_sigma(lowercase_ , lowercase_).to(sample.device)
__snake_case = torch.zeros_like(lowercase_)
__snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__snake_case = diffusion.unsqueeze(-1)
__snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=lowercase_ , device=sample.device , dtype=sample.dtype)
__snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase_ , prev_sample_mean=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase_).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
__snake_case = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
__snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__snake_case = step_size.unsqueeze(-1)
__snake_case = sample + step_size * model_output
__snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = timesteps.to(original_samples.device)
__snake_case = self.discrete_sigmas.to(original_samples.device)[timesteps]
__snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase_) * sigmas[:, None, None, None]
)
__snake_case = noise + original_samples
return noisy_samples
def __len__( self) -> Any:
return self.config.num_train_timesteps
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
def A ( snake_case__ : list[list[float]] ) -> list[list[float]]:
'''simple docstring'''
__snake_case = []
for data in source_data:
for i, el in enumerate(snake_case__ ):
if len(snake_case__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(snake_case__ ) )
return data_lists
def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> list[list[float]]:
'''simple docstring'''
__snake_case = []
for dlist, weight in zip(snake_case__ , snake_case__ ):
__snake_case = min(snake_case__ )
__snake_case = max(snake_case__ )
__snake_case = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__snake_case = f"Invalid weight of {weight:f} provided"
raise ValueError(snake_case__ )
score_lists.append(snake_case__ )
return score_lists
def A ( snake_case__ : list[list[float]] ) -> list[float]:
'''simple docstring'''
__snake_case = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(snake_case__ ):
__snake_case = final_scores[j] + ele
return final_scores
def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> list[list[float]]:
'''simple docstring'''
__snake_case = get_data(snake_case__ )
__snake_case = calculate_each_score(snake_case__ , snake_case__ )
__snake_case = generate_final_scores(snake_case__ )
# append scores to source data
for i, ele in enumerate(snake_case__ ):
source_data[i].append(snake_case__ )
return source_data
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int:
super().__init__()
if hasattr(scheduler.config , 'steps_offset') and scheduler.config.steps_offset != 1:
__snake_case = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , lowercase_ , standard_warn=lowercase_)
__snake_case = dict(scheduler.config)
__snake_case = 1
__snake_case = FrozenDict(lowercase_)
if hasattr(scheduler.config , 'skip_prk_steps') and scheduler.config.skip_prk_steps is False:
__snake_case = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , lowercase_ , standard_warn=lowercase_)
__snake_case = dict(scheduler.config)
__snake_case = True
__snake_case = FrozenDict(lowercase_)
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
segmentation_model=lowercase_ , segmentation_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Union[str, Any]:
self.enable_attention_slicing(lowercase_)
def _a ( self) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
__snake_case = torch.device('cuda')
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_)
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self) -> Union[str, Any]:
if self.device != torch.device('meta') or not hasattr(self.unet , '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt').to(self.device)
__snake_case = self.segmentation_model(**lowercase_)
__snake_case = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
__snake_case = self.numpy_to_pil(lowercase_)[0].resize(image.size)
# Run inpainting pipeline with the generated mask
__snake_case = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , )
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
UpperCAmelCase__ : List[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = 8 , **lowercase_ , ) -> None:
super().__init__(**lowercase_)
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_pad
__snake_case = pad_size
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None) -> List[str]:
__snake_case , __snake_case = get_image_size(lowercase_)
__snake_case = (old_height // size + 1) * size - old_height
__snake_case = (old_width // size + 1) * size - old_width
return pad(lowercase_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase_)
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> int:
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_pad if do_pad is not None else self.do_pad
__snake_case = pad_size if pad_size is not None else self.pad_size
__snake_case = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(lowercase_) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_pad:
__snake_case = [self.pad(lowercase_ , size=lowercase_) for image in images]
__snake_case = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
__snake_case = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def A ( snake_case__ : Tuple , snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
__snake_case = Mock()
__snake_case = conn, Mock()
__snake_case = iter([1, None] )
__snake_case = lambda snake_case__ : next(snake_case__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=snake_case__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
import re
def A ( snake_case__ : str ) -> bool:
'''simple docstring'''
__snake_case = re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(snake_case__ , snake_case__ ) )
if __name__ == "__main__":
UpperCAmelCase__ : int = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = 42
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , lowercase_ = 3 , lowercase_ = 3 , lowercase_ = ("DownEncoderBlock2D",) , lowercase_ = ("UpDecoderBlock2D",) , lowercase_ = (6_4,) , lowercase_ = 1 , lowercase_ = "silu" , lowercase_ = 3 , lowercase_ = 3_2 , lowercase_ = 2_5_6 , lowercase_ = 3_2 , lowercase_ = None , lowercase_ = 0.1_8215 , lowercase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
__snake_case = Encoder(
in_channels=lowercase_ , out_channels=lowercase_ , down_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , double_z=lowercase_ , )
__snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
__snake_case = VectorQuantizer(lowercase_ , lowercase_ , beta=0.25 , remap=lowercase_ , sane_index_shape=lowercase_)
__snake_case = nn.Convad(lowercase_ , lowercase_ , 1)
# pass init params to Decoder
__snake_case = Decoder(
in_channels=lowercase_ , out_channels=lowercase_ , up_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , norm_type=lowercase_ , )
@apply_forward_hook
def _a ( self , lowercase_ , lowercase_ = True) -> VQEncoderOutput:
__snake_case = self.encoder(lowercase_)
__snake_case = self.quant_conv(lowercase_)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase_)
@apply_forward_hook
def _a ( self , lowercase_ , lowercase_ = False , lowercase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
__snake_case , __snake_case , __snake_case = self.quantize(lowercase_)
else:
__snake_case = h
__snake_case = self.post_quant_conv(lowercase_)
__snake_case = self.decoder(lowercase_ , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ = True) -> Union[DecoderOutput, torch.FloatTensor]:
__snake_case = sample
__snake_case = self.encode(lowercase_).latents
__snake_case = self.decode(lowercase_).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase_)
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
import string
def A ( snake_case__ : str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__snake_case = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__snake_case = string.ascii_uppercase.find(snake_case__ )
__snake_case = num - key
if num < 0:
__snake_case = num + len(string.ascii_uppercase )
__snake_case = translated + string.ascii_uppercase[num]
else:
__snake_case = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def A ( ) -> None:
'''simple docstring'''
__snake_case = input('Encrypted message: ' )
__snake_case = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __lowercase :
def __init__( self) -> int:
__snake_case = {}
def _a ( self , lowercase_ , lowercase_ , lowercase_=1) -> str:
if self.graph.get(lowercase_):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
__snake_case = [[w, v]]
if not self.graph.get(lowercase_):
__snake_case = []
def _a ( self) -> int:
return list(self.graph)
def _a ( self , lowercase_ , lowercase_) -> Optional[Any]:
if self.graph.get(lowercase_):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_)
def _a ( self , lowercase_=-2 , lowercase_=-1) -> Optional[int]:
if s == d:
return []
__snake_case = []
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase_)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return visited
def _a ( self , lowercase_=-1) -> List[Any]:
if c == -1:
__snake_case = floor(random() * 1_0_0_0_0) + 1_0
for i in range(lowercase_):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2) + 1):
__snake_case = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1)
def _a ( self , lowercase_=-2) -> List[str]:
__snake_case = deque()
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
d.append(lowercase_)
visited.append(lowercase_)
while d:
__snake_case = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def _a ( self , lowercase_) -> Any:
__snake_case = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _a ( self , lowercase_) -> Dict:
return len(self.graph[u])
def _a ( self , lowercase_=-2) -> Union[str, Any]:
__snake_case = []
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = s
__snake_case = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return sorted_nodes
def _a ( self) -> Optional[Any]:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return list(lowercase_)
def _a ( self) -> List[Any]:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return False
def _a ( self , lowercase_=-2 , lowercase_=-1) -> List[Any]:
__snake_case = time()
self.dfs(lowercase_ , lowercase_)
__snake_case = time()
return end - begin
def _a ( self , lowercase_=-2) -> Dict:
__snake_case = time()
self.bfs(lowercase_)
__snake_case = time()
return end - begin
class __lowercase :
def __init__( self) -> str:
__snake_case = {}
def _a ( self , lowercase_ , lowercase_ , lowercase_=1) -> str:
# check if the u exists
if self.graph.get(lowercase_):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
__snake_case = [[w, v]]
# add the other way
if self.graph.get(lowercase_):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
__snake_case = [[w, u]]
def _a ( self , lowercase_ , lowercase_) -> Union[str, Any]:
if self.graph.get(lowercase_):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase_)
# the other way round
if self.graph.get(lowercase_):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase_)
def _a ( self , lowercase_=-2 , lowercase_=-1) -> Tuple:
if s == d:
return []
__snake_case = []
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowercase_)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return visited
def _a ( self , lowercase_=-1) -> Dict:
if c == -1:
__snake_case = floor(random() * 1_0_0_0_0) + 1_0
for i in range(lowercase_):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2) + 1):
__snake_case = floor(random() * c) + 1
if n != i:
self.add_pair(lowercase_ , lowercase_ , 1)
def _a ( self , lowercase_=-2) -> int:
__snake_case = deque()
__snake_case = []
if s == -2:
__snake_case = list(self.graph)[0]
d.append(lowercase_)
visited.append(lowercase_)
while d:
__snake_case = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def _a ( self , lowercase_) -> Union[str, Any]:
return len(self.graph[u])
def _a ( self) -> List[str]:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return list(lowercase_)
def _a ( self) -> int:
__snake_case = []
__snake_case = []
__snake_case = list(self.graph)[0]
stack.append(lowercase_)
visited.append(lowercase_)
__snake_case = -2
__snake_case = []
__snake_case = s
__snake_case = False
__snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__snake_case = len(lowercase_) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__snake_case = True
if len(lowercase_) != 0:
__snake_case = stack[len(lowercase_) - 1]
else:
__snake_case = False
indirect_parents.append(lowercase_)
__snake_case = s
__snake_case = ss
# check if se have reached the starting point
if len(lowercase_) == 0:
return False
def _a ( self) -> Tuple:
return list(self.graph)
def _a ( self , lowercase_=-2 , lowercase_=-1) -> int:
__snake_case = time()
self.dfs(lowercase_ , lowercase_)
__snake_case = time()
return end - begin
def _a ( self , lowercase_=-2) -> str:
__snake_case = time()
self.bfs(lowercase_)
__snake_case = time()
return end - begin
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''imagegpt'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase_=5_1_2 + 1 , lowercase_=3_2 * 3_2 , lowercase_=5_1_2 , lowercase_=2_4 , lowercase_=8 , lowercase_=None , lowercase_="quick_gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1e-5 , lowercase_=0.02 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , **lowercase_ , ) -> Union[str, Any]:
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = n_inner
__snake_case = activation_function
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = scale_attn_weights
__snake_case = use_cache
__snake_case = scale_attn_by_inverse_layer_idx
__snake_case = reorder_and_upcast_attn
__snake_case = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase_ , **lowercase_)
class __lowercase ( lowerCamelCase__ ):
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
])
def _a ( self , lowercase_ , lowercase_ = 1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 3_2 , lowercase_ = 3_2 , ) -> Mapping[str, Any]:
__snake_case = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_))
return inputs
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from __future__ import annotations
def A ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ) -> None:
'''simple docstring'''
__snake_case = len(snake_case__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , )
def A ( snake_case__ : int ) -> None:
'''simple docstring'''
__snake_case = []
depth_first_search([] , [] , [] , snake_case__ , snake_case__ )
# Print all the boards
for board in boards:
for column in board:
print(snake_case__ )
print('' )
print(len(snake_case__ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase__ : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def A ( snake_case__ : Dict="no" , snake_case__ : str = default_json_config_file , snake_case__ : bool = False ) -> Optional[int]:
'''simple docstring'''
__snake_case = Path(snake_case__ )
path.parent.mkdir(parents=snake_case__ , exist_ok=snake_case__ )
if path.exists():
print(
f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
__snake_case = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
__snake_case = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__snake_case = torch.cuda.device_count()
__snake_case = num_gpus
__snake_case = False
if num_gpus > 1:
__snake_case = 'MULTI_GPU'
else:
__snake_case = 'NO'
elif is_xpu_available() and use_xpu:
__snake_case = torch.xpu.device_count()
__snake_case = num_xpus
__snake_case = False
if num_xpus > 1:
__snake_case = 'MULTI_XPU'
else:
__snake_case = 'NO'
elif is_npu_available():
__snake_case = torch.npu.device_count()
__snake_case = num_npus
__snake_case = False
if num_npus > 1:
__snake_case = 'MULTI_NPU'
else:
__snake_case = 'NO'
else:
__snake_case = 0
__snake_case = True
__snake_case = 1
__snake_case = 'NO'
__snake_case = ClusterConfig(**snake_case__ )
config.to_json_file(snake_case__ )
return path
def A ( snake_case__ : Dict , snake_case__ : Dict ) -> Any:
'''simple docstring'''
__snake_case = parser.add_parser('default' , parents=snake_case__ , help=snake_case__ , formatter_class=snake_case__ )
parser.add_argument(
'--config_file' , default=snake_case__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=snake_case__ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=snake_case__ )
return parser
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"accelerate configuration saved at {config_file}" )
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
from collections.abc import Sequence
def A ( snake_case__ : Sequence[float] , snake_case__ : float ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(snake_case__ ) )
def A ( snake_case__ : Sequence[float] , snake_case__ : float ) -> float:
'''simple docstring'''
__snake_case = 0.0
for coeff in reversed(snake_case__ ):
__snake_case = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase__ : Dict = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCAmelCase__ : List[str] = True
from torch.cuda.amp import autocast
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
def A ( snake_case__ : Dict=None , snake_case__ : Optional[Any]=None ) -> Optional[Any]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class __lowercase :
__UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
__UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
__UpperCAmelCase = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
__UpperCAmelCase = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
__UpperCAmelCase = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
__UpperCAmelCase = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class __lowercase :
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__UpperCAmelCase = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
__UpperCAmelCase = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class __lowercase :
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , lowercase_) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
__snake_case = [{'input_values': feature['input_values']} for feature in features]
__snake_case = [{'input_ids': feature['labels']} for feature in features]
__snake_case = self.processor.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
__snake_case = self.processor.pad(
labels=lowercase_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
__snake_case = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1) , -1_0_0)
__snake_case = labels
return batch
class __lowercase ( lowerCamelCase__ ):
def _a ( self , lowercase_ , lowercase_) -> torch.Tensor:
model.train()
__snake_case = self._prepare_inputs(lowercase_)
if self.use_amp:
with autocast():
__snake_case = self.compute_loss(lowercase_ , lowercase_)
else:
__snake_case = self.compute_loss(lowercase_ , lowercase_)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__snake_case = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__snake_case = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
__snake_case = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowercase_).backward()
elif self.use_apex:
with amp.scale_loss(lowercase_ , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowercase_)
else:
loss.backward()
return loss.detach()
def A ( ) -> str:
'''simple docstring'''
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__snake_case = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
__snake_case = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
__snake_case = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(snake_case__ : str ):
__snake_case = re.sub(snake_case__ , '' , batch['sentence'] ).lower() + ' '
return batch
__snake_case = train_dataset.map(snake_case__ , remove_columns=['sentence'] )
__snake_case = eval_dataset.map(snake_case__ , remove_columns=['sentence'] )
def extract_all_chars(snake_case__ : Optional[Any] ):
__snake_case = ' '.join(batch['text'] )
__snake_case = list(set(snake_case__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
__snake_case = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=train_dataset.column_names , )
__snake_case = train_dataset.map(
snake_case__ , batched=snake_case__ , batch_size=-1 , keep_in_memory=snake_case__ , remove_columns=eval_dataset.column_names , )
__snake_case = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
__snake_case = {v: k for k, v in enumerate(snake_case__ )}
__snake_case = vocab_dict[' ']
del vocab_dict[" "]
__snake_case = len(snake_case__ )
__snake_case = len(snake_case__ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(snake_case__ , snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ )
__snake_case = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
__snake_case = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__snake_case = min(len(snake_case__ ) , data_args.max_train_samples )
__snake_case = train_dataset.select(range(snake_case__ ) )
if data_args.max_val_samples is not None:
__snake_case = eval_dataset.select(range(data_args.max_val_samples ) )
__snake_case = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case__ : List[str] ):
__snake_case , __snake_case = torchaudio.load(batch['path'] )
__snake_case = resampler(snake_case__ ).squeeze().numpy()
__snake_case = 1_6000
__snake_case = batch['text']
return batch
__snake_case = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__snake_case = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case__ : Tuple ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
__snake_case = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(snake_case__ )
return batch
__snake_case = train_dataset.map(
snake_case__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
__snake_case = eval_dataset.map(
snake_case__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
__snake_case = datasets.load_metric('wer' )
def compute_metrics(snake_case__ : int ):
__snake_case = pred.predictions
__snake_case = np.argmax(snake_case__ , axis=-1 )
__snake_case = processor.tokenizer.pad_token_id
__snake_case = processor.batch_decode(snake_case__ )
# we do not want to group tokens when computing the metrics
__snake_case = processor.batch_decode(pred.label_ids , group_tokens=snake_case__ )
__snake_case = wer_metric.compute(predictions=snake_case__ , references=snake_case__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__snake_case = DataCollatorCTCWithPadding(processor=snake_case__ , padding=snake_case__ )
# Initialize our Trainer
__snake_case = CTCTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , compute_metrics=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__snake_case = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__snake_case = model_args.model_name_or_path
else:
__snake_case = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__snake_case = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
__snake_case = train_result.metrics
__snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
__snake_case = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('train' , snake_case__ )
trainer.save_metrics('train' , snake_case__ )
trainer.save_state()
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case = trainer.evaluate()
__snake_case = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case__ )
__snake_case = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
return results
if __name__ == "__main__":
main()
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__snake_case = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase_ , scheduler=lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowercase_):
__snake_case = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__snake_case = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowercase_)}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators.")
__snake_case = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowercase_)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
__snake_case = self.unet(lowercase_ , lowercase_).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_).prev_sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __lowercase ( lowerCamelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase__ = 2
@register_to_config
def __init__( self , lowercase_ = 0.02 , lowercase_ = 1_0_0 , lowercase_ = 1.007 , lowercase_ = 8_0 , lowercase_ = 0.05 , lowercase_ = 5_0 , ) -> Any:
# standard deviation of the initial noise distribution
__snake_case = sigma_max
# setable values
__snake_case = None
__snake_case = None
__snake_case = None # sigma(t_i)
def _a ( self , lowercase_ , lowercase_ = None) -> torch.FloatTensor:
return sample
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple:
__snake_case = num_inference_steps
__snake_case = np.arange(0 , self.num_inference_steps)[::-1].copy()
__snake_case = torch.from_numpy(lowercase_).to(lowercase_)
__snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__snake_case = torch.tensor(lowercase_ , dtype=torch.floataa , device=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
__snake_case = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
__snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
__snake_case = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase_).to(sample.device)
__snake_case = sigma + gamma * sigma
__snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[KarrasVeOutput, Tuple]:
__snake_case = sample_hat + sigma_hat * model_output
__snake_case = (sample_hat - pred_original_sample) / sigma_hat
__snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_ , derivative=lowercase_ , pred_original_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[KarrasVeOutput, Tuple]:
__snake_case = sample_prev + sigma_prev * model_output
__snake_case = (sample_prev - pred_original_sample) / sigma_prev
__snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_ , derivative=lowercase_ , pred_original_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]:
raise NotImplementedError()
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def A ( snake_case__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
'''simple docstring'''
if isinstance(snake_case__ , np.ndarray ):
return list(tensor.shape )
__snake_case = tf.shape(snake_case__ )
if tensor.shape == tf.TensorShape(snake_case__ ):
return dynamic
__snake_case = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case__ )]
def A ( snake_case__ : tf.Tensor , snake_case__ : Optional[int] = None , snake_case__ : Optional[str] = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=snake_case__ , name=snake_case__ )
def A ( snake_case__ : Any , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=1e-5 , snake_case__ : List[str]=-1 ) -> List[Any]:
'''simple docstring'''
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case__ , snake_case__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
__snake_case , __snake_case = tf.nn.moments(snake_case__ , axes=[axis] , keepdims=snake_case__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__snake_case = [1] * inputs.shape.rank
__snake_case = shape_list(snake_case__ )[axis]
__snake_case = tf.reshape(snake_case__ , snake_case__ )
__snake_case = tf.reshape(snake_case__ , snake_case__ )
# Compute layer normalization using the batch_normalization
# function.
__snake_case = tf.nn.batch_normalization(
snake_case__ , snake_case__ , snake_case__ , offset=snake_case__ , scale=snake_case__ , variance_epsilon=snake_case__ , )
return outputs
def A ( snake_case__ : Any , snake_case__ : str=0 , snake_case__ : str=-1 ) -> Optional[int]:
'''simple docstring'''
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__snake_case = tf.shape(snake_case__ )
__snake_case = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__snake_case = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case__ , snake_case__ )
def A ( snake_case__ : tf.Tensor ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(snake_case__ , tf.Tensor ):
__snake_case = tf.convert_to_tensor(snake_case__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__snake_case = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__snake_case = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def A ( snake_case__ : tf.Tensor , snake_case__ : int , snake_case__ : str = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
snake_case__ , tf.cast(snake_case__ , dtype=tensor.dtype ) , message=(
f"The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case__ )}) must be smaller than the embedding "
f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : str ) -> Tuple:
'''simple docstring'''
__snake_case = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__snake_case = [x for x in data if len(snake_case__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}" )
__snake_case = np.asarray(snake_case__ )
__snake_case = 1
__snake_case = np.array_split(snake_case__ , snake_case__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__snake_case = np.array_split(snake_case__ , snake_case__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case__ ):
__snake_case = chunk_data
else:
__snake_case = data
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
if name in group.attrs:
__snake_case = [n.decode('utf8' ) if hasattr(snake_case__ , 'decode' ) else n for n in group.attrs[name]]
else:
__snake_case = []
__snake_case = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(snake_case__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def A ( snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
def _expand_single_ad_tensor(snake_case__ : List[Any] ):
if isinstance(snake_case__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case__ )
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ : Any = "http://www.mocksite.com/file1.txt"
UpperCAmelCase__ : Dict = "\"text\": [\"foo\", \"foo\"]"
UpperCAmelCase__ : Union[str, Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __lowercase :
__UpperCAmelCase = 200
__UpperCAmelCase = {'''Content-Length''': '''100'''}
__UpperCAmelCase = {}
def _a ( self , **lowercase_) -> str:
return [bytes(lowercase_ , 'utf-8')]
def A ( *snake_case__ : int , **snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def A ( snake_case__ : Dict , snake_case__ : str , snake_case__ : int ) -> List[Any]:
'''simple docstring'''
import requests
monkeypatch.setattr(snake_case__ , 'request' , snake_case__ )
__snake_case = URL
if issubclass(snake_case__ , snake_case__ ):
__snake_case = url
elif issubclass(snake_case__ , snake_case__ ):
__snake_case = [url]
elif issubclass(snake_case__ , snake_case__ ):
__snake_case = {'train': url}
__snake_case = 'dummy'
__snake_case = 'downloads'
__snake_case = tmp_path
__snake_case = DownloadConfig(
cache_dir=os.path.join(snake_case__ , snake_case__ ) , use_etag=snake_case__ , )
__snake_case = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ )
__snake_case = dl_manager.download(snake_case__ )
__snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case__ , snake_case__ ):
__snake_case = [downloaded_paths]
__snake_case = [urls]
elif isinstance(snake_case__ , snake_case__ ):
assert "train" in downloaded_paths.keys()
__snake_case = downloaded_paths.values()
__snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case__ , snake_case__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case = Path(snake_case__ )
__snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case = downloaded_path.read_text()
assert content == CONTENT
__snake_case = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
__snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def A ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case = str(snake_case__ )
if issubclass(snake_case__ , snake_case__ ):
__snake_case = filename
elif issubclass(snake_case__ , snake_case__ ):
__snake_case = [filename]
elif issubclass(snake_case__ , snake_case__ ):
__snake_case = {'train': filename}
__snake_case = 'dummy'
__snake_case = xz_file.parent
__snake_case = 'extracted'
__snake_case = DownloadConfig(
cache_dir=snake_case__ , use_etag=snake_case__ , )
__snake_case = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ )
__snake_case = dl_manager.extract(snake_case__ )
__snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case__ , snake_case__ ):
__snake_case = [extracted_paths]
__snake_case = [paths]
elif isinstance(snake_case__ , snake_case__ ):
assert "train" in extracted_paths.keys()
__snake_case = extracted_paths.values()
__snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case__ , snake_case__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case = Path(snake_case__ )
__snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case__ , etag=snake_case__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case = extracted_path.read_text()
__snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def A ( snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert path.endswith('.jsonl' )
for num_items, line in enumerate(snake_case__ , start=1 ):
__snake_case = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def A ( snake_case__ : Any , snake_case__ : List[Any] ) -> Tuple:
'''simple docstring'''
__snake_case = request.getfixturevalue(snake_case__ )
__snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
_test_jsonl(snake_case__ , snake_case__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def A ( snake_case__ : List[str] , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = request.getfixturevalue(snake_case__ )
__snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ):
_test_jsonl(snake_case__ , snake_case__ )
assert num_tar == 1
assert num_jsonl == 2
def A ( snake_case__ : List[Any] ) -> int:
'''simple docstring'''
__snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case__ ) , start=1 ):
assert os.path.basename(snake_case__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
UpperCAmelCase__ : Any = ["gpt2"]
UpperCAmelCase__ : List[str] = "gpt2"
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( tf.Module ):
def __init__( self , lowercase_) -> int:
super().__init__()
__snake_case = tokenizer
__snake_case = AutoConfig.from_pretrained(lowercase_)
__snake_case = TFGPTaLMHeadModel.from_config(lowercase_)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text'),))
def _a ( self , lowercase_) -> Dict:
__snake_case = self.tokenizer(lowercase_)
__snake_case = tokenized['input_ids'].to_tensor()
__snake_case = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__snake_case = self.model(input_ids=lowercase_ , attention_mask=lowercase_)['logits']
return outputs
@require_tf
@require_keras_nlp
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _a ( self) -> List[Any]:
super().setUp()
__snake_case = [GPTaTokenizer.from_pretrained(lowercase_) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__snake_case = [TFGPTaTokenizer.from_pretrained(lowercase_) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
__snake_case = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__snake_case = list(zip(self.test_sentences , self.test_sentences[::-1]))
def _a ( self) -> List[str]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
__snake_case = tokenizer([test_inputs] , return_tensors='tf')
__snake_case = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__snake_case = python_outputs[key].numpy()
__snake_case = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(lowercase_ , tf.intaa) == tf_outputs_values))
@slow
def _a ( self) -> int:
for tf_tokenizer in self.tf_tokenizers:
__snake_case = tf.function(lowercase_)
for test_inputs in self.test_sentences:
__snake_case = tf.constant(lowercase_)
__snake_case = compiled_tokenizer(lowercase_)
__snake_case = tf_tokenizer(lowercase_)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def _a ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__snake_case = ModelToSave(tokenizer=lowercase_)
__snake_case = tf.convert_to_tensor([self.test_sentences[0]])
__snake_case = model.serving(lowercase_) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__snake_case = Path(lowercase_) / 'saved.model'
tf.saved_model.save(lowercase_ , lowercase_ , signatures={'serving_default': model.serving})
__snake_case = tf.saved_model.load(lowercase_)
__snake_case = loaded_model.signatures['serving_default'](lowercase_)['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def _a ( self) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__snake_case = tf.convert_to_tensor([self.test_sentences[0]])
__snake_case = tf_tokenizer(lowercase_) # Build model with some sample inputs
__snake_case = tf_tokenizer.get_config()
__snake_case = TFGPTaTokenizer.from_config(lowercase_)
__snake_case = model_from_config(lowercase_)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def _a ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__snake_case = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__snake_case = tf.convert_to_tensor([self.test_sentences[0]])
__snake_case = tf_tokenizer(lowercase_ , max_length=lowercase_)
__snake_case = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = CustomTokenizer
pass
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : str = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : Optional[Any] = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : int = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : str = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = "tabby, tabby cat"
UpperCAmelCase__ : Optional[int] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 3 , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = "relu" , ) -> List[str]:
super().__init__()
__snake_case = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
__snake_case = nn.BatchNormad(lowercase_)
__snake_case = ACTaFN[activation] if activation is not None else nn.Identity()
def _a ( self , lowercase_) -> int:
__snake_case = self.convolution(lowercase_)
__snake_case = self.normalization(lowercase_)
__snake_case = self.activation(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Optional[int]:
super().__init__()
__snake_case = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act)
__snake_case = config.num_channels
def _a ( self , lowercase_) -> Any:
__snake_case = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
__snake_case = self.embedder(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ = 2) -> Union[str, Any]:
super().__init__()
__snake_case = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_)
__snake_case = nn.BatchNormad(lowercase_)
def _a ( self , lowercase_) -> Tensor:
__snake_case = self.convolution(lowercase_)
__snake_case = self.normalization(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_) -> List[str]:
super().__init__()
__snake_case = nn.AdaptiveAvgPoolad((1, 1))
__snake_case = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.Sigmoid() , )
def _a ( self , lowercase_) -> int:
# b c h w -> b c 1 1
__snake_case = self.pooler(lowercase_)
__snake_case = self.attention(lowercase_)
__snake_case = hidden_state * attention
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1) -> Any:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width)
__snake_case = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , )
__snake_case = ACTaFN[config.hidden_act]
def _a ( self , lowercase_) -> Optional[int]:
__snake_case = hidden_state
__snake_case = self.layer(lowercase_)
__snake_case = self.shortcut(lowercase_)
hidden_state += residual
__snake_case = self.activation(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1) -> Dict:
super().__init__()
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width)
__snake_case = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity()
)
__snake_case = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , )
__snake_case = ACTaFN[config.hidden_act]
def _a ( self , lowercase_) -> Union[str, Any]:
__snake_case = hidden_state
__snake_case = self.layer(lowercase_)
__snake_case = self.shortcut(lowercase_)
hidden_state += residual
__snake_case = self.activation(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 2 , lowercase_ = 2 , ) -> Dict:
super().__init__()
__snake_case = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__snake_case = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_) for _ in range(depth - 1)] , )
def _a ( self , lowercase_) -> Union[str, Any]:
__snake_case = self.layers(lowercase_)
return hidden_state
class __lowercase ( nn.Module ):
def __init__( self , lowercase_) -> Union[str, Any]:
super().__init__()
__snake_case = nn.ModuleList([])
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
__snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:]):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_))
def _a ( self , lowercase_ , lowercase_ = False , lowercase_ = True) -> BaseModelOutputWithNoAttention:
__snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
__snake_case = stage_module(lowercase_)
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = RegNetConfig
__UpperCAmelCase = '''regnet'''
__UpperCAmelCase = '''pixel_values'''
__UpperCAmelCase = True
def _a ( self , lowercase_) -> Union[str, Any]:
if isinstance(lowercase_ , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu')
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def _a ( self , lowercase_ , lowercase_=False) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_):
__snake_case = value
UpperCAmelCase__ : List[str] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Dict = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> List[Any]:
super().__init__(lowercase_)
__snake_case = config
__snake_case = RegNetEmbeddings(lowercase_)
__snake_case = RegNetEncoder(lowercase_)
__snake_case = nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None) -> BaseModelOutputWithPoolingAndNoAttention:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.embedder(lowercase_)
__snake_case = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_)
__snake_case = encoder_outputs[0]
__snake_case = self.pooler(lowercase_)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_) -> Any:
super().__init__(lowercase_)
__snake_case = config.num_labels
__snake_case = RegNetModel(lowercase_)
# classification head
__snake_case = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> ImageClassifierOutputWithNoAttention:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_)
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(lowercase_)
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = 'single_label_classification'
else:
__snake_case = 'multi_label_classification'
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze())
else:
__snake_case = loss_fct(lowercase_ , lowercase_)
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(lowercase_ , lowercase_)
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states)
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = ["BeitFeatureExtractor"]
UpperCAmelCase__ : Union[str, Any] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
from string import ascii_uppercase
UpperCAmelCase__ : str = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase__ : Optional[int] = dict(enumerate(ascii_uppercase))
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = 0
while True:
if x == i:
__snake_case = 0
if len(snake_case__ ) == len(snake_case__ ):
break
key += key[i]
i += 1
return key
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = ''
__snake_case = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__snake_case = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def A ( snake_case__ : str , snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = ''
__snake_case = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__snake_case = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def A ( ) -> None:
'''simple docstring'''
__snake_case = 'THE GERMAN ATTACK'
__snake_case = 'SECRET'
__snake_case = generate_key(snake_case__ , snake_case__ )
__snake_case = cipher_text(snake_case__ , snake_case__ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(snake_case__ , snake_case__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def A ( snake_case__ : str ) -> None:
'''simple docstring'''
__snake_case , __snake_case = analyze_text(snake_case__ )
__snake_case = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
__snake_case = sum(single_char_strings.values() )
# one length string
__snake_case = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__snake_case = single_char_strings[ch]
__snake_case = my_str / all_sum
my_fir_sum += prob * math.loga(snake_case__ ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__snake_case = sum(two_char_strings.values() )
__snake_case = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__snake_case = cha + cha
if sequence in two_char_strings:
__snake_case = two_char_strings[sequence]
__snake_case = int(snake_case__ ) / all_sum
my_sec_sum += prob * math.loga(snake_case__ )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def A ( snake_case__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
__snake_case = Counter() # type: ignore
__snake_case = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(snake_case__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def A ( ) -> Tuple:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase__ : str = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( snake_case__ : Matrix , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( snake_case__ : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( snake_case__ : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(snake_case__ ):
__snake_case , __snake_case = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__snake_case = digit
if sudoku(snake_case__ ) is not None:
return grid
__snake_case = 0
return None
def A ( snake_case__ : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(snake_case__ , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> str:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> str:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> int:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> str:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> str:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> str:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> str:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
def A ( *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
requires_backends(snake_case__ , ['torch'] )
def A ( *snake_case__ : int , **snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(snake_case__ , ['torch'] )
def A ( *snake_case__ : Optional[int] , **snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(snake_case__ , ['torch'] )
def A ( *snake_case__ : Tuple , **snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(snake_case__ , ['torch'] )
def A ( *snake_case__ : str , **snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
requires_backends(snake_case__ , ['torch'] )
def A ( *snake_case__ : int , **snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(snake_case__ , ['torch'] )
def A ( *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(snake_case__ , ['torch'] )
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> int:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Dict:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> str:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Any:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Tuple:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> int:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> str:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Dict:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(cls , ['torch'])
class __lowercase ( metaclass=lowerCamelCase__ ):
__UpperCAmelCase = ['''torch''']
def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
requires_backends(self , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['torch'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]:
requires_backends(cls , ['torch'])
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , **lowercase_) -> None:
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase : List[Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> str:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _a ( self) -> Optional[int]:
__snake_case = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(lowercase_)
def _a ( self) -> Tuple:
__snake_case = self._create_example_records()
__snake_case = Dataset.from_list(lowercase_)
self.assertListEqual(dset.column_names , ['col_1', 'col_2'])
for i, r in enumerate(lowercase_):
self.assertDictEqual(lowercase_ , example_records[i])
def _a ( self) -> Optional[Any]:
__snake_case = self._create_example_records()
__snake_case = Dataset.from_list(lowercase_)
__snake_case = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _a ( self) -> int: # checks what happens with missing columns
__snake_case = [{'col_1': 1}, {'col_2': 'x'}]
__snake_case = Dataset.from_list(lowercase_)
self.assertDictEqual(dset[0] , {'col_1': 1})
self.assertDictEqual(dset[1] , {'col_1': None}) # NB: first record is used for columns
def _a ( self) -> List[Any]: # checks if the type can be inferred from the second record
__snake_case = [{'col_1': []}, {'col_1': [1, 2]}]
__snake_case = Dataset.from_list(lowercase_)
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64')))
def _a ( self) -> Optional[Any]:
__snake_case = Dataset.from_list([])
self.assertEqual(len(lowercase_) , 0)
self.assertListEqual(dset.column_names , [])
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( snake_case__ : str , snake_case__ : int=1 ) -> Union[str, Any]:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def A ( snake_case__ : List[Any] , snake_case__ : List[str]=0 ) -> List[Any]:
'''simple docstring'''
__snake_case = []
for old_item in old_list:
__snake_case = old_item.replace('in_layers.0' , 'norm1' )
__snake_case = new_item.replace('in_layers.2' , 'conv1' )
__snake_case = new_item.replace('out_layers.0' , 'norm2' )
__snake_case = new_item.replace('out_layers.3' , 'conv2' )
__snake_case = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__snake_case = new_item.replace('skip_connection' , 'conv_shortcut' )
__snake_case = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case__ : Any , snake_case__ : Dict=0 ) -> Dict:
'''simple docstring'''
__snake_case = []
for old_item in old_list:
__snake_case = old_item
__snake_case = new_item.replace('norm.weight' , 'group_norm.weight' )
__snake_case = new_item.replace('norm.bias' , 'group_norm.bias' )
__snake_case = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__snake_case = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__snake_case = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def A ( snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None ) -> Optional[int]:
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__snake_case = old_checkpoint[path]
__snake_case = old_tensor.shape[0] // 3
__snake_case = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__snake_case = old_tensor.shape[0] // config['num_head_channels'] // 3
__snake_case = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__snake_case , __snake_case , __snake_case = old_tensor.split(channels // num_heads , dim=1 )
__snake_case = query.reshape(snake_case__ )
__snake_case = key.reshape(snake_case__ )
__snake_case = value.reshape(snake_case__ )
for path in paths:
__snake_case = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__snake_case = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__snake_case = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__snake_case = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__snake_case = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__snake_case = old_checkpoint[path['old']][:, :, 0]
else:
__snake_case = old_checkpoint[path['old']]
def A ( snake_case__ : List[str] , snake_case__ : Tuple ) -> str:
'''simple docstring'''
__snake_case = {}
__snake_case = checkpoint['time_embed.0.weight']
__snake_case = checkpoint['time_embed.0.bias']
__snake_case = checkpoint['time_embed.2.weight']
__snake_case = checkpoint['time_embed.2.bias']
__snake_case = checkpoint['input_blocks.0.0.weight']
__snake_case = checkpoint['input_blocks.0.0.bias']
__snake_case = checkpoint['out.0.weight']
__snake_case = checkpoint['out.0.bias']
__snake_case = checkpoint['out.2.weight']
__snake_case = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
__snake_case = (i - 1) // (config['num_res_blocks'] + 1)
__snake_case = (i - 1) % (config['num_res_blocks'] + 1)
__snake_case = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
__snake_case = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
if f"input_blocks.{i}.0.op.weight" in checkpoint:
__snake_case = checkpoint[
f"input_blocks.{i}.0.op.weight"
]
__snake_case = checkpoint[
f"input_blocks.{i}.0.op.bias"
]
continue
__snake_case = renew_resnet_paths(snake_case__ )
__snake_case = {'old': f"input_blocks.{i}.0", 'new': f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
__snake_case = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
__snake_case = renew_attention_paths(snake_case__ )
__snake_case = {
'old': f"input_blocks.{i}.1",
'new': f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
__snake_case = {
f"input_blocks.{i}.1.qkv.bias": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"input_blocks.{i}.1.qkv.weight": {
'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
__snake_case = middle_blocks[0]
__snake_case = middle_blocks[1]
__snake_case = middle_blocks[2]
__snake_case = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
__snake_case = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
__snake_case = renew_attention_paths(snake_case__ )
__snake_case = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
__snake_case = i // (config['num_res_blocks'] + 1)
__snake_case = i % (config['num_res_blocks'] + 1)
__snake_case = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
__snake_case = {}
for layer in output_block_layers:
__snake_case , __snake_case = layer.split('.' )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
__snake_case = [layer_name]
if len(snake_case__ ) > 1:
__snake_case = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
__snake_case = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
__snake_case = renew_resnet_paths(snake_case__ )
__snake_case = renew_resnet_paths(snake_case__ )
__snake_case = {'old': f"output_blocks.{i}.0", 'new': f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__snake_case = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__snake_case = checkpoint[
f"output_blocks.{i}.{index}.conv.weight"
]
__snake_case = checkpoint[
f"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
__snake_case = []
if len(snake_case__ ):
__snake_case = renew_attention_paths(snake_case__ )
__snake_case = {
'old': f"output_blocks.{i}.1",
'new': f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
__snake_case = {
f"output_blocks.{i}.1.qkv.bias": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
f"output_blocks.{i}.1.qkv.weight": {
'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case__ , )
else:
__snake_case = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__snake_case = '.'.join(['output_blocks', str(snake_case__ ), path['old']] )
__snake_case = '.'.join(['up_blocks', str(snake_case__ ), 'resnets', str(snake_case__ ), path['new']] )
__snake_case = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Optional[int] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCAmelCase__ : Optional[Any] = json.loads(f.read())
UpperCAmelCase__ : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCAmelCase__ : Tuple = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCAmelCase__ : Tuple = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCAmelCase__ : Optional[Any] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCAmelCase__ : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = "Hello world! cécé herlolip"
def A ( snake_case__ : str , snake_case__ : str , snake_case__ : bool ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FairseqRobertaModel.from_pretrained(snake_case__ )
roberta.eval() # disable dropout
__snake_case = roberta.model.encoder.sentence_encoder
__snake_case = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , snake_case__ )
__snake_case = XLMRobertaXLForSequenceClassification(snake_case__ ) if classification_head else XLMRobertaXLForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__snake_case = roberta_sent_encoder.embed_tokens.weight
__snake_case = roberta_sent_encoder.embed_positions.weight
__snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__snake_case = roberta_sent_encoder.layer_norm.weight
__snake_case = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__snake_case = model.roberta.encoder.layer[i]
__snake_case = roberta_sent_encoder.layers[i]
__snake_case = layer.attention
__snake_case = roberta_layer.self_attn_layer_norm.weight
__snake_case = roberta_layer.self_attn_layer_norm.bias
# self attention
__snake_case = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__snake_case = roberta_layer.self_attn.q_proj.weight
__snake_case = roberta_layer.self_attn.q_proj.bias
__snake_case = roberta_layer.self_attn.k_proj.weight
__snake_case = roberta_layer.self_attn.k_proj.bias
__snake_case = roberta_layer.self_attn.v_proj.weight
__snake_case = roberta_layer.self_attn.v_proj.bias
# self-attention output
__snake_case = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__snake_case = roberta_layer.self_attn.out_proj.weight
__snake_case = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__snake_case = roberta_layer.final_layer_norm.weight
__snake_case = roberta_layer.final_layer_norm.bias
# intermediate
__snake_case = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case = roberta_layer.fca.weight
__snake_case = roberta_layer.fca.bias
# output
__snake_case = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case = roberta_layer.fca.weight
__snake_case = roberta_layer.fca.bias
# end of layer
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'].dense.weight
__snake_case = roberta.model.classification_heads['mnli'].dense.bias
__snake_case = roberta.model.classification_heads['mnli'].out_proj.weight
__snake_case = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__snake_case = roberta.model.encoder.lm_head.dense.weight
__snake_case = roberta.model.encoder.lm_head.dense.bias
__snake_case = roberta.model.encoder.lm_head.layer_norm.weight
__snake_case = roberta.model.encoder.lm_head.layer_norm.bias
__snake_case = roberta.model.encoder.lm_head.weight
__snake_case = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__snake_case = roberta.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
__snake_case = model(snake_case__ )[0]
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'](roberta.extract_features(snake_case__ ) )
else:
__snake_case = roberta.model(snake_case__ )[0]
print(our_output.shape , their_output.shape )
__snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__snake_case = torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , **lowercase_) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Dict = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
def A ( snake_case__ : int ) -> list:
'''simple docstring'''
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
__snake_case = gray_code_sequence_string(snake_case__ )
#
# convert them to integers
for i in range(len(snake_case__ ) ):
__snake_case = int(sequence[i] , 2 )
return sequence
def A ( snake_case__ : int ) -> list:
'''simple docstring'''
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__snake_case = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__snake_case = gray_code_sequence_string(bit_count - 1 )
__snake_case = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__snake_case = '0' + smaller_sequence[i]
sequence.append(snake_case__ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__snake_case = '1' + smaller_sequence[i]
sequence.append(snake_case__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import argparse
import os
import re
import packaging.version
UpperCAmelCase__ : int = "examples/"
UpperCAmelCase__ : int = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCAmelCase__ : List[str] = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
UpperCAmelCase__ : Tuple = "README.md"
def A ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__snake_case = f.read()
__snake_case , __snake_case = REPLACE_PATTERNS[pattern]
__snake_case = replace.replace('VERSION' , snake_case__ )
__snake_case = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def A ( snake_case__ : int ) -> Any:
'''simple docstring'''
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def A ( snake_case__ : Dict , snake_case__ : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def A ( ) -> List[Any]:
'''simple docstring'''
__snake_case = '🤗 Transformers currently provides the following architectures'
__snake_case = '1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__snake_case = f.readlines()
# Find the start of the list.
__snake_case = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__snake_case = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def A ( ) -> Optional[Any]:
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
__snake_case = f.read()
__snake_case = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def A ( snake_case__ : str=False ) -> Tuple:
'''simple docstring'''
__snake_case = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__snake_case = default_version.base_version
elif patch:
__snake_case = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__snake_case = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__snake_case = input(f"Which version are you releasing? [{default_version}]" )
if len(snake_case__ ) == 0:
__snake_case = default_version
print(f"Updating version to {version}." )
global_version_update(snake_case__ , patch=snake_case__ )
def A ( ) -> Any:
'''simple docstring'''
__snake_case = get_version()
__snake_case = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__snake_case = current_version.base_version
# Check with the user we got that right.
__snake_case = input(f"Which version are we developing now? [{dev_version}]" )
if len(snake_case__ ) == 0:
__snake_case = dev_version
print(f"Updating version to {version}." )
global_version_update(snake_case__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCAmelCase__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowerCAmelCase__ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _a ( self) -> str:
return self._get_superresolution_dummy_components()
def _a ( self , lowercase_ , lowercase_=0) -> List[str]:
if str(lowercase_).startswith('mps'):
__snake_case = torch.manual_seed(lowercase_)
else:
__snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_)
__snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_)).to(lowercase_)
__snake_case = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowercase_)).to(lowercase_)
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def _a ( self) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _a ( self) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def _a ( self) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def _a ( self) -> List[Any]:
self._test_save_load_local()
def _a ( self) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "spiece.model"}
UpperCAmelCase__ : Any = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {"bert_for_seq_generation": 5_12}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = []
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<::::>" , lowercase_ = None , **lowercase_ , ) -> None:
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@property
def _a ( self) -> Dict:
return self.sp_model.get_piece_size()
def _a ( self) -> int:
__snake_case = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> List[Any]:
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self , lowercase_) -> Tuple:
__snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _a ( self , lowercase_) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def _a ( self , lowercase_) -> Any:
return self.sp_model.piece_to_id(lowercase_)
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case = self.sp_model.IdToPiece(lowercase_)
return token
def _a ( self , lowercase_) -> str:
__snake_case = []
__snake_case = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_) + token
__snake_case = []
else:
current_sub_tokens.append(lowercase_)
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
if not os.path.isdir(lowercase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , 'wb') as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
import os
def A ( ) -> List[str]:
'''simple docstring'''
__snake_case = os.path.dirname(os.path.realpath(snake_case__ ) )
__snake_case = os.path.join(snake_case__ , 'triangle.txt' )
with open(snake_case__ ) as f:
__snake_case = f.readlines()
__snake_case = []
for line in triangle:
__snake_case = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
for j in range(len(a[i] ) ):
__snake_case = a[i - 1][j] if j != len(a[i - 1] ) else 0
__snake_case = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ , snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCAmelCase__ : List[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
UpperCAmelCase__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( ) -> List[str]:
'''simple docstring'''
__snake_case = 'https://pypi.org/pypi/diffusers/json'
__snake_case = json.loads(request.urlopen(snake_case__ ).read() )['releases'].keys()
return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) )
def A ( ) -> Optional[int]:
'''simple docstring'''
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case__ )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__snake_case = Path(snake_case__ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def A ( snake_case__ : Union[str, os.PathLike] ) -> Optional[int]:
'''simple docstring'''
init_hf_modules()
__snake_case = Path(snake_case__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__snake_case = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def A ( snake_case__ : Any ) -> str:
'''simple docstring'''
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
__snake_case = f.read()
# Imports of the form `import .xxx`
__snake_case = re.findall('^\s*import\s+\.(\S+)\s*$' , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , snake_case__ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case__ ) )
def A ( snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = False
__snake_case = [module_file]
__snake_case = []
# Let's recurse through all relative imports
while not no_change:
__snake_case = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case__ ) )
__snake_case = Path(snake_case__ ).parent
__snake_case = [str(module_path / m ) for m in new_imports]
__snake_case = [f for f in new_import_files if f not in all_relative_imports]
__snake_case = [f"{f}.py" for f in new_import_files]
__snake_case = len(snake_case__ ) == 0
all_relative_imports.extend(snake_case__ )
return all_relative_imports
def A ( snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
__snake_case = f.read()
# Imports of the form `import xxx`
__snake_case = re.findall('^\s*import\s+(\S+)\s*$' , snake_case__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , snake_case__ , flags=re.MULTILINE )
# Only keep the top-level module
__snake_case = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
__snake_case = list(set(snake_case__ ) )
__snake_case = []
for imp in imports:
try:
importlib.import_module(snake_case__ )
except ImportError:
missing_packages.append(snake_case__ )
if len(snake_case__ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`" )
return get_relative_imports(snake_case__ )
def A ( snake_case__ : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
__snake_case = module_path.replace(os.path.sep , '.' )
__snake_case = importlib.import_module(snake_case__ )
if class_name is None:
return find_pipeline_class(snake_case__ )
return getattr(snake_case__ , snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__snake_case = dict(inspect.getmembers(snake_case__ , inspect.isclass ) )
__snake_case = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case__ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
__snake_case = cls
return pipeline_class
def A ( snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ) -> str:
'''simple docstring'''
__snake_case = str(snake_case__ )
__snake_case = os.path.join(snake_case__ , snake_case__ )
if os.path.isfile(snake_case__ ):
__snake_case = module_file_or_url
__snake_case = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
__snake_case = get_diffusers_versions()
# cut ".dev0"
__snake_case = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
__snake_case = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
__snake_case = f"v{revision}"
elif revision == "main":
__snake_case = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
__snake_case = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ )
try:
__snake_case = cached_download(
snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
__snake_case = 'git'
__snake_case = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
__snake_case = hf_hub_download(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , )
__snake_case = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
__snake_case = check_imports(snake_case__ )
# Now we move the module inside our cached dynamic modules.
__snake_case = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case__ )
__snake_case = Path(snake_case__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case__ , submodule_path / module_file )
for module_needed in modules_needed:
__snake_case = f"{module_needed}.py"
shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case__ , snake_case__ ):
__snake_case = use_auth_token
elif use_auth_token is True:
__snake_case = HfFolder.get_token()
else:
__snake_case = None
__snake_case = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__snake_case = submodule_path / commit_hash
__snake_case = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case__ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case__ , f"{module_needed}.py" , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return os.path.join(snake_case__ , snake_case__ )
def A ( snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : List[Any] , ) -> Dict:
'''simple docstring'''
__snake_case = get_cached_module_file(
snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , )
return get_class_in_module(snake_case__ , final_module.replace('.py' , '' ) )
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_=5_1_2 , lowercase_="cls" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
__snake_case = project_dim
__snake_case = pooler_fn
__snake_case = learn_encoder
__snake_case = use_attention_mask
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [R'''pooler''', R'''logit_scale''']
__UpperCAmelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
__UpperCAmelCase = '''roberta'''
__UpperCAmelCase = RobertaSeriesConfig
def __init__( self , lowercase_) -> Optional[int]:
super().__init__(lowercase_)
__snake_case = XLMRobertaModel(lowercase_)
__snake_case = nn.Linear(config.hidden_size , config.project_dim)
__snake_case = getattr(lowercase_ , 'has_pre_transformation' , lowercase_)
if self.has_pre_transformation:
__snake_case = nn.Linear(config.hidden_size , config.project_dim)
__snake_case = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps)
self.post_init()
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Tuple:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.base_model(
input_ids=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_attentions=lowercase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowercase_ , )
if self.has_pre_transformation:
__snake_case = outputs['hidden_states'][-2]
__snake_case = self.pre_LN(lowercase_)
__snake_case = self.transformation_pre(lowercase_)
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__snake_case = self.transformation(outputs.last_hidden_state)
return TransformationModelOutput(
projection_state=lowercase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
def A ( snake_case__ : list ) -> list:
'''simple docstring'''
__snake_case = len(snake_case__ )
for _ in range(snake_case__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : Optional[int] = hf_hub_url(repo_id=SCREAMING_SNAKE_CASE , path=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(SCREAMING_SNAKE_CASE )}"""
| 677 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = False ):
_lowercase : Any = scheduler
_lowercase : int = optimizers if isinstance(_lowerCAmelCase , (list, tuple) ) else [optimizers]
_lowercase : Optional[Any] = split_batches
_lowercase : Dict = step_with_optimizer
_lowercase : Dict = GradientState()
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCAmelCase , **_lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCAmelCase , **_lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_lowercase : List[str] = AcceleratorState().num_processes
for _ in range(_lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCAmelCase , **_lowerCAmelCase )
else:
self.scheduler.step(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self ):
return self.scheduler.get_last_lr()
def __a ( self ):
return self.scheduler.state_dict()
def __a ( self , _lowerCAmelCase ):
self.scheduler.load_state_dict(_lowerCAmelCase )
def __a ( self ):
return self.scheduler.get_lr()
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.scheduler.print_lr(*_lowerCAmelCase , **_lowerCAmelCase )
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_disk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
print('moving disk from' , SCREAMING_SNAKE_CASE , 'to' , SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Union[str, Any]:
_lowercase : Optional[int] = int(input('Height of hanoi: ' ).strip() )
move_tower(SCREAMING_SNAKE_CASE , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 677 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "pix2struct_text_model"
_UpperCamelCase : List[str] = ["past_key_values"]
_UpperCamelCase : Optional[int] = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=5_0_2_4_4 , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=6_4 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=1_2_8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=1.0 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ):
_lowercase : Dict = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : int = d_kv
_lowercase : Dict = d_ff
_lowercase : Tuple = num_layers
_lowercase : str = num_heads
_lowercase : Tuple = relative_attention_num_buckets
_lowercase : str = relative_attention_max_distance
_lowercase : Optional[Any] = dropout_rate
_lowercase : Optional[int] = layer_norm_epsilon
_lowercase : Tuple = initializer_factor
_lowercase : Tuple = use_cache
_lowercase : Optional[Any] = eos_token_id
_lowercase : List[Any] = decoder_start_token_id
# for backwards compatibility
_lowercase : Optional[Any] = dense_act_fn
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , is_decoder=_lowerCAmelCase , **_lowerCAmelCase , )
@classmethod
def __a ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
_lowercase , _lowercase : Dict = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowercase : Optional[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = "pix2struct_vision_model"
def __init__( self , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=6_4 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=1E-6 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=1E-10 , _lowerCAmelCase=1.0 , _lowerCAmelCase=4_0_9_6 , _lowerCAmelCase=3_2 , _lowerCAmelCase=1_2_8 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : int = hidden_size
_lowercase : Dict = patch_embed_hidden_size
_lowercase : str = d_ff
_lowercase : Any = dropout_rate
_lowercase : str = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : Tuple = initializer_range
_lowercase : Tuple = initializer_factor
_lowercase : str = attention_dropout
_lowercase : Dict = layer_norm_eps
_lowercase : List[Any] = dense_act_fn
_lowercase : int = seq_len
_lowercase : Optional[int] = relative_attention_num_buckets
_lowercase : Any = relative_attention_max_distance
_lowercase : Union[str, Any] = d_kv
@classmethod
def __a ( cls , _lowerCAmelCase , **_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
_lowercase , _lowercase : Tuple = cls.get_config_dict(_lowerCAmelCase , **_lowerCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
_lowercase : str = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowerCAmelCase , **_lowerCAmelCase )
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Optional[int] = "pix2struct"
_UpperCamelCase : Tuple = True
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ):
super().__init__(tie_word_embeddings=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
if text_config is None:
_lowercase : List[str] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
_lowercase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
_lowercase : Union[str, Any] = PixaStructTextConfig(**_lowerCAmelCase )
_lowercase : Optional[int] = PixaStructVisionConfig(**_lowerCAmelCase )
_lowercase : Optional[Any] = self.text_config.decoder_start_token_id
_lowercase : List[str] = self.text_config.pad_token_id
_lowercase : Tuple = self.text_config.eos_token_id
_lowercase : Union[str, Any] = initializer_factor
_lowercase : Union[str, Any] = initializer_range
_lowercase : Any = self.initializer_range
_lowercase : str = self.initializer_range
_lowercase : List[str] = is_vqa
@classmethod
def __a ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = copy.deepcopy(self.__dict__ )
_lowercase : List[Any] = self.text_config.to_dict()
_lowercase : Optional[Any] = self.vision_config.to_dict()
_lowercase : Union[str, Any] = self.__class__.model_type
return output
| 677 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
_lowercase : Tuple = []
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = []
for rt in rc.restypes:
_lowercase : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowercase : int = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowercase : str = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , )
_lowercase : str = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['aatype'].device , )
_lowercase : List[Any] = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['aatype'].device , )
_lowercase : Tuple = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowercase : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
_lowercase : Optional[Any] = restype_atomaa_mask[protein_aatype]
_lowercase : Optional[Any] = residx_atomaa_mask
_lowercase : int = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowercase : Tuple = restype_atomaa_to_atomaa[protein_aatype]
_lowercase : Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowercase : List[str] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowercase : List[str] = rc.restype_atoa[restype_letter]
_lowercase : Union[str, Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowercase : Tuple = rc.atom_order[atom_name]
_lowercase : str = 1
_lowercase : Optional[int] = restype_atomaa_mask[protein_aatype]
_lowercase : int = residx_atomaa_mask
return protein
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict[str, np.ndarray]:
_lowercase : Optional[Any] = tree_map(lambda SCREAMING_SNAKE_CASE : torch.tensor(SCREAMING_SNAKE_CASE , device=batch['aatype'].device ) , SCREAMING_SNAKE_CASE , np.ndarray )
_lowercase : List[str] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : np.array(SCREAMING_SNAKE_CASE ) , make_atomaa_masks(SCREAMING_SNAKE_CASE ) )
return out
| 677 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , ):
super().__init__()
self.register_modules(transformer=_lowerCAmelCase , vae=_lowerCAmelCase , scheduler=_lowerCAmelCase )
# create a imagenet -> id dictionary for easier use
_lowercase : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
_lowercase : int = int(_lowerCAmelCase )
_lowercase : Optional[int] = dict(sorted(self.labels.items() ) )
def __a ( self , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = list(_lowerCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = 4.0 , _lowerCAmelCase = None , _lowerCAmelCase = 5_0 , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , ):
_lowercase : List[Any] = len(_lowerCAmelCase )
_lowercase : str = self.transformer.config.sample_size
_lowercase : Any = self.transformer.config.in_channels
_lowercase : List[str] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowerCAmelCase , device=self.device , dtype=self.transformer.dtype , )
_lowercase : Any = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_lowercase : Dict = torch.tensor(_lowerCAmelCase , device=self.device ).reshape(-1 )
_lowercase : int = torch.tensor([1_0_0_0] * batch_size , device=self.device )
_lowercase : List[str] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_lowercase : Tuple = latent_model_input[: len(_lowerCAmelCase ) // 2]
_lowercase : Dict = torch.cat([half, half] , dim=0 )
_lowercase : Union[str, Any] = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = t
if not torch.is_tensor(_lowerCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowercase : str = latent_model_input.device.type == 'mps'
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = torch.floataa if is_mps else torch.floataa
else:
_lowercase : Optional[Any] = torch.intaa if is_mps else torch.intaa
_lowercase : List[Any] = torch.tensor([timesteps] , dtype=_lowerCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_lowercase : Dict = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowercase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_lowercase : Tuple = self.transformer(
_lowerCAmelCase , timestep=_lowerCAmelCase , class_labels=_lowerCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
_lowercase , _lowercase : List[str] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowercase , _lowercase : Optional[int] = torch.split(_lowerCAmelCase , len(_lowerCAmelCase ) // 2 , dim=0 )
_lowercase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowercase : str = torch.cat([half_eps, half_eps] , dim=0 )
_lowercase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowercase , _lowercase : Tuple = torch.split(_lowerCAmelCase , _lowerCAmelCase , dim=1 )
else:
_lowercase : Tuple = noise_pred
# compute previous image: x_t -> x_t-1
_lowercase : List[str] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
if guidance_scale > 1:
_lowercase , _lowercase : int = latent_model_input.chunk(2 , dim=0 )
else:
_lowercase : Optional[Any] = latent_model_input
_lowercase : str = 1 / self.vae.config.scaling_factor * latents
_lowercase : int = self.vae.decode(_lowerCAmelCase ).sample
_lowercase : int = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase : Union[str, Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowercase : Union[str, Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
# using dfs for finding eulerian path traversal
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
_lowercase : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowercase , _lowercase : Optional[int] = True, True
_lowercase : List[Any] = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return path
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Any = 0
_lowercase : Optional[int] = -1
for i in range(SCREAMING_SNAKE_CASE ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowercase : Any = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowercase , _lowercase : List[Any] = check_circuit_or_path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_lowercase : Any = 1
if check == 2:
_lowercase : Optional[int] = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_lowercase : Union[str, Any] = dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Tuple:
_lowercase : Optional[int] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowercase : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowercase : int = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowercase : Union[str, Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowercase : List[Any] = {
1: [],
2: []
# all degree is zero
}
_lowercase : Dict = 10
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_euler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 | 1 |
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : Any = graph
self._normalize_graph(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = len(_lowerCAmelCase )
_lowercase : Tuple = None
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if sources is int:
_lowercase : Union[str, Any] = [sources]
if sinks is int:
_lowercase : Dict = [sinks]
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
return
_lowercase : Tuple = sources[0]
_lowercase : Any = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_lowerCAmelCase ) > 1 or len(_lowerCAmelCase ) > 1:
_lowercase : Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_lowercase : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_lowercase : str = max_input_flow
_lowercase : Dict = 0
_lowercase : List[str] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_lowercase : str = max_input_flow
_lowercase : List[Any] = size - 1
def __a ( self ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = algorithm(self )
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Dict = flow_network
_lowercase : List[str] = flow_network.verticesCount
_lowercase : Tuple = flow_network.sourceIndex
_lowercase : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_lowercase : Any = flow_network.graph
_lowercase : Optional[Any] = False
def __a ( self ):
if not self.executed:
self._algorithm()
_lowercase : str = True
def __a ( self ):
pass
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase )
# use this to save your result
_lowercase : List[str] = -1
def __a ( self ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase )
_lowercase : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
_lowercase : List[Any] = [0] * self.verticies_count
_lowercase : Optional[Any] = [0] * self.verticies_count
def __a ( self ):
_lowercase : Optional[int] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_lowercase : Dict = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_lowercase : Tuple = 0
while i < len(_lowerCAmelCase ):
_lowercase : List[str] = vertices_list[i]
_lowercase : Union[str, Any] = self.heights[vertex_index]
self.process_vertex(_lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_lowerCAmelCase ) )
_lowercase : Any = 0
else:
i += 1
_lowercase : Any = sum(self.preflow[self.source_index] )
def __a ( self , _lowerCAmelCase ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_lowerCAmelCase , _lowerCAmelCase )
self.relabel(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_lowercase : str = self.heights[to_index]
if min_height is not None:
_lowercase : List[str] = min_height + 1
if __name__ == "__main__":
UpperCamelCase = [0]
UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 677 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
return EnvironmentCommand()
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return EnvironmentCommand(args.accelerate_config_file )
class lowerCAmelCase_ ( __snake_case ):
@staticmethod
def __a ( _lowerCAmelCase ):
_lowercase : int = parser.add_parser('env' )
download_parser.set_defaults(func=_lowerCAmelCase )
download_parser.add_argument(
'--accelerate-config_file' , default=_lowerCAmelCase , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _lowerCAmelCase , *_lowerCAmelCase ):
_lowercase : int = accelerate_config_file
def __a ( self ):
_lowercase : Optional[Any] = 'not installed'
if is_safetensors_available():
import safetensors
_lowercase : Optional[int] = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
_lowercase : Optional[Any] = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_lowercase : Optional[Any] = 'not installed'
_lowercase : Any = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowercase : List[Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowerCAmelCase ):
_lowercase : str = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowercase : Any = (
'\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else F"""\t{accelerate_config}"""
)
_lowercase : List[str] = 'not installed'
_lowercase : Dict = 'NA'
if is_torch_available():
import torch
_lowercase : Any = torch.__version__
_lowercase : int = torch.cuda.is_available()
_lowercase : List[Any] = 'not installed'
_lowercase : int = 'NA'
if is_tf_available():
import tensorflow as tf
_lowercase : Any = tf.__version__
try:
# deprecated in v2.1
_lowercase : Optional[Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowercase : Dict = bool(tf.config.list_physical_devices('GPU' ) )
_lowercase : List[str] = 'not installed'
_lowercase : Optional[Any] = 'not installed'
_lowercase : List[Any] = 'not installed'
_lowercase : Any = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
_lowercase : int = flax.__version__
_lowercase : Union[str, Any] = jax.__version__
_lowercase : str = jaxlib.__version__
_lowercase : int = jax.lib.xla_bridge.get_backend().platform
_lowercase : Union[str, Any] = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F"""{safetensors_version}""",
'Accelerate version': F"""{accelerate_version}""",
'Accelerate config': F"""{accelerate_config_str}""",
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Tensorflow version (GPU?)': F"""{tf_version} ({tf_cuda_available})""",
'Flax version (CPU?/GPU?/TPU?)': F"""{flax_version} ({jax_backend})""",
'Jax version': F"""{jax_version}""",
'JaxLib version': F"""{jaxlib_version}""",
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_lowerCAmelCase ) )
return info
@staticmethod
def __a ( _lowerCAmelCase ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 677 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 1 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
UpperCamelCase = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCamelCase = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCamelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 677 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="pt" ) -> List[Any]:
_lowercase : Optional[int] = {'add_prefix_space': True} if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not line.startswith(' ' ) else {}
_lowercase : Optional[Any] = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE , padding='max_length' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
_lowercase : Tuple = input_ids.ne(SCREAMING_SNAKE_CASE ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="train" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="" , ):
super().__init__()
_lowercase : int = Path(_lowerCAmelCase ).joinpath(type_path + '.source' )
_lowercase : Tuple = Path(_lowerCAmelCase ).joinpath(type_path + '.target' )
_lowercase : int = self.get_char_lens(self.src_file )
_lowercase : int = max_source_length
_lowercase : int = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowercase : Optional[int] = tokenizer
_lowercase : Union[str, Any] = prefix
if n_obs is not None:
_lowercase : Optional[Any] = self.src_lens[:n_obs]
_lowercase : List[Any] = src_lang
_lowercase : Dict = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _lowerCAmelCase ):
_lowercase : Optional[int] = index + 1 # linecache starts at 1
_lowercase : int = self.prefix + linecache.getline(str(self.src_file ) , _lowerCAmelCase ).rstrip('\n' )
_lowercase : Optional[int] = linecache.getline(str(self.tgt_file ) , _lowerCAmelCase ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowercase : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
)
_lowercase : Dict = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
_lowercase : Any = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_source_length , 'right' )
_lowercase : Union[str, Any] = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_target_length , 'right' )
_lowercase : List[str] = source_inputs['input_ids'].squeeze()
_lowercase : Tuple = target_inputs['input_ids'].squeeze()
_lowercase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __a ( _lowerCAmelCase ):
return [len(_lowerCAmelCase ) for x in Path(_lowerCAmelCase ).open().readlines()]
def __a ( self , _lowerCAmelCase ):
_lowercase : Dict = torch.stack([x['input_ids'] for x in batch] )
_lowercase : Any = torch.stack([x['attention_mask'] for x in batch] )
_lowercase : List[Any] = torch.stack([x['decoder_input_ids'] for x in batch] )
_lowercase : Dict = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowercase : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowercase : Tuple = trim_batch(_lowerCAmelCase , _lowerCAmelCase )
_lowercase , _lowercase : Union[str, Any] = trim_batch(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase )
_lowercase : int = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCamelCase = getLogger(__name__)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
_lowercase : List[str] = get_git_info()
save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , 'git_log.json' ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=4 , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
with open(SCREAMING_SNAKE_CASE ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Dict:
_lowercase : Tuple = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = {
'repo_id': str(SCREAMING_SNAKE_CASE ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List:
return list(map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'wb' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
def remove_articles(SCREAMING_SNAKE_CASE ):
return re.sub(R'\b(a|an|the)\b' , ' ' , SCREAMING_SNAKE_CASE )
def white_space_fix(SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE ):
_lowercase : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE ) ) ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : int = normalize_answer(SCREAMING_SNAKE_CASE ).split()
_lowercase : str = normalize_answer(SCREAMING_SNAKE_CASE ).split()
_lowercase : Optional[int] = Counter(SCREAMING_SNAKE_CASE ) & Counter(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = sum(common.values() )
if num_same == 0:
return 0
_lowercase : Union[str, Any] = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
_lowercase : Dict = 1.0 * num_same / len(SCREAMING_SNAKE_CASE )
_lowercase : str = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
return normalize_answer(SCREAMING_SNAKE_CASE ) == normalize_answer(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
em += exact_match_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
em /= len(SCREAMING_SNAKE_CASE )
return {"em": em}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
return model_prefix.startswith('rag' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowercase : int = 'dropout_rate'
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not hasattr(SCREAMING_SNAKE_CASE , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
_lowercase : Optional[Any] = p if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return hparams, config
| 677 |
from __future__ import annotations
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=None ):
_lowercase : int = data
_lowercase : Union[str, Any] = None
def __repr__( self ):
_lowercase : Dict = []
_lowercase : Tuple = self
while temp:
string_rep.append(F"""{temp.data}""" )
_lowercase : Optional[Any] = temp.next
return "->".join(_lowerCAmelCase )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : Union[str, Any] = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : List[Any] = current.next
return head
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def __magic_name__ ( ) -> List[str]:
from doctest import testmod
testmod()
_lowercase : int = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(SCREAMING_SNAKE_CASE )
print('Elements in Reverse:' )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = ["input_features", "is_longer"]
def __init__( self , _lowerCAmelCase=6_4 , _lowerCAmelCase=4_8_0_0_0 , _lowerCAmelCase=4_8_0 , _lowerCAmelCase=1_0 , _lowerCAmelCase=1_0_2_4 , _lowerCAmelCase=0.0 , _lowerCAmelCase=False , _lowerCAmelCase = 0 , _lowerCAmelCase = 1_4_0_0_0 , _lowerCAmelCase = None , _lowerCAmelCase = "fusion" , _lowerCAmelCase = "repeatpad" , **_lowerCAmelCase , ):
super().__init__(
feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Optional[Any] = top_db
_lowercase : List[Any] = truncation
_lowercase : Tuple = padding
_lowercase : Optional[int] = fft_window_size
_lowercase : Tuple = (fft_window_size >> 1) + 1
_lowercase : Optional[Any] = hop_length
_lowercase : List[Any] = max_length_s
_lowercase : Optional[Any] = max_length_s * sampling_rate
_lowercase : Optional[int] = sampling_rate
_lowercase : Union[str, Any] = frequency_min
_lowercase : List[Any] = frequency_max
_lowercase : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCAmelCase , min_frequency=_lowerCAmelCase , max_frequency=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , norm=_lowerCAmelCase , mel_scale='htk' , )
_lowercase : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCAmelCase , min_frequency=_lowerCAmelCase , max_frequency=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , norm='slaney' , mel_scale='slaney' , )
def __a ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Dict = spectrogram(
_lowerCAmelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCAmelCase , log_mel='dB' , )
return log_mel_spectrogram.T
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowercase : int = [0]
# randomly choose index for each part
_lowercase : Optional[Any] = np.random.choice(ranges[0] )
_lowercase : Dict = np.random.choice(ranges[1] )
_lowercase : Tuple = np.random.choice(ranges[2] )
_lowercase : Optional[Any] = mel[idx_front : idx_front + chunk_frames, :]
_lowercase : List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
_lowercase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
_lowercase : Any = torch.tensor(mel[None, None, :] )
_lowercase : Any = torch.nn.functional.interpolate(
_lowerCAmelCase , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=_lowerCAmelCase )
_lowercase : Union[str, Any] = mel_shrink[0][0].numpy()
_lowercase : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowercase : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowercase : Optional[Any] = len(_lowerCAmelCase ) - max_length
_lowercase : List[Any] = np.random.randint(0 , overflow + 1 )
_lowercase : List[Any] = waveform[idx : idx + max_length]
_lowercase : List[Any] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowercase : Optional[Any] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters )
_lowercase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowercase : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowercase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
_lowercase : Optional[int] = False
else:
_lowercase : Optional[Any] = self._random_mel_fusion(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_lowercase : Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowercase : Optional[int] = int(max_length / len(_lowerCAmelCase ) )
_lowercase : List[Any] = np.stack(np.tile(_lowerCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowercase : Dict = int(max_length / len(_lowerCAmelCase ) )
_lowercase : Optional[Any] = np.stack(np.tile(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Tuple = np.pad(_lowerCAmelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
_lowercase : Union[str, Any] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters )
_lowercase : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowercase : Tuple = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : Optional[Any] = truncation if truncation is not None else self.truncation
_lowercase : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : int = isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : Union[str, Any] = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ):
_lowercase : Union[str, Any] = np.asarray(_lowerCAmelCase , dtype=np.floataa )
elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : Optional[int] = [np.asarray(_lowerCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
_lowercase : List[Any] = [
self._get_input_mel(_lowerCAmelCase , max_length if max_length else self.nb_max_samples , _lowerCAmelCase , _lowerCAmelCase )
for waveform in raw_speech
]
_lowercase : Any = []
_lowercase : Tuple = []
for mel, longer in padded_inputs:
input_mel.append(_lowerCAmelCase )
is_longer.append(_lowerCAmelCase )
if truncation == "fusion" and sum(_lowerCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowercase : int = np.random.randint(0 , len(_lowerCAmelCase ) )
_lowercase : Union[str, Any] = True
if isinstance(input_mel[0] , _lowerCAmelCase ):
_lowercase : Optional[int] = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowercase : str = [[longer] for longer in is_longer]
_lowercase : List[str] = {'input_features': input_mel, 'is_longer': is_longer}
_lowercase : Tuple = BatchFeature(_lowerCAmelCase )
if return_tensors is not None:
_lowercase : str = input_features.convert_to_tensors(_lowerCAmelCase )
return input_features
| 677 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3 , _lowerCAmelCase=1_0 , _lowerCAmelCase=[1_0, 2_0, 3_0, 4_0] , _lowerCAmelCase=[1, 1, 2, 1] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=3 , _lowerCAmelCase=None , ):
_lowercase : List[Any] = parent
_lowercase : int = batch_size
_lowercase : Union[str, Any] = image_size
_lowercase : str = num_channels
_lowercase : Dict = embeddings_size
_lowercase : Any = hidden_sizes
_lowercase : Union[str, Any] = depths
_lowercase : List[str] = is_training
_lowercase : int = use_labels
_lowercase : Optional[int] = hidden_act
_lowercase : Optional[Any] = num_labels
_lowercase : List[Any] = scope
_lowercase : List[str] = len(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : str = self.get_config()
return config, pixel_values, labels
def __a ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFResNetModel(config=_lowerCAmelCase )
_lowercase : Tuple = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : List[str] = TFResNetForImageClassification(_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self ):
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCamelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Any = False
def __a ( self ):
_lowercase : Any = TFResNetModelTester(self )
_lowercase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def __a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __a ( self ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __a ( self ):
pass
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[str] = model_class(_lowerCAmelCase )
_lowercase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Optional[Any] = [*signature.parameters.keys()]
_lowercase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = model_class(_lowerCAmelCase )
_lowercase : str = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase : str = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : str = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase : Dict = layer_type
_lowercase : Tuple = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFResNetModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> Any:
_lowercase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self ):
_lowercase : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowercase : Any = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : List[str] = image_processor(images=_lowerCAmelCase , return_tensors='tf' )
# forward pass
_lowercase : str = model(**_lowerCAmelCase )
# verify the logits
_lowercase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowercase : List[str] = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1E-4 ) )
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
def get_masked_lm_array(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowercase : Tuple = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "kernel" in name:
_lowercase : Tuple = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE )
def get_encoder_array(SCREAMING_SNAKE_CASE ):
_lowercase : Any = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowercase : List[str] = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "kernel" in name:
_lowercase : Dict = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowercase : Optional[int] = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "kernel" in name:
_lowercase : List[str] = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : int = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowercase : Optional[int] = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[str] = array.reshape(SCREAMING_SNAKE_CASE )
if "kernel" in name:
_lowercase : Union[str, Any] = array.transpose()
return torch.from_numpy(SCREAMING_SNAKE_CASE )
print(F"""Loading model based on config from {config_path}...""" )
_lowercase : List[Any] = BertConfig.from_json_file(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = BertForMaskedLM(SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowercase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_lowercase : BertSelfAttention = layer.attention.self
_lowercase : Any = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
_lowercase : Any = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
_lowercase : Optional[Any] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
_lowercase : Tuple = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
_lowercase : Optional[int] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
_lowercase : Tuple = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
_lowercase : BertSelfOutput = layer.attention.output
_lowercase : List[Any] = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
_lowercase : str = get_encoder_attention_layer_array(
SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
_lowercase : Union[str, Any] = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
_lowercase : int = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
_lowercase : BertIntermediate = layer.intermediate
_lowercase : Any = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
_lowercase : Optional[Any] = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
_lowercase : BertOutput = layer.output
_lowercase : int = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
_lowercase : int = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_output_dense/bias' )
_lowercase : Any = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
_lowercase : Optional[int] = get_encoder_layer_array(SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
_lowercase : List[str] = get_encoder_array('_position_embedding_layer/embeddings' )
_lowercase : int = get_encoder_array('_type_embedding_layer/embeddings' )
_lowercase : Union[str, Any] = get_encoder_array('_embedding_norm_layer/gamma' )
_lowercase : List[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
_lowercase : Dict = model.cls.predictions.transform
_lowercase : Dict = get_masked_lm_array('dense/kernel' )
_lowercase : Any = get_masked_lm_array('dense/bias' )
_lowercase : Any = get_masked_lm_array('layer_norm/gamma' )
_lowercase : Dict = get_masked_lm_array('layer_norm/beta' )
_lowercase : Optional[Any] = get_masked_lm_array('embedding_table' )
# Pooling
_lowercase : str = BertPooler(config=SCREAMING_SNAKE_CASE )
_lowercase : BertPooler = get_encoder_array('_pooler_layer/kernel' )
_lowercase : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
_lowercase : Union[str, Any] = BertForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
UpperCamelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 677 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCamelCase = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : Dict = do_lower_case
_lowercase : Optional[Any] = strip_accents
_lowercase : Any = tokenize_chinese_chars
_lowercase : Tuple = normalizer_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : str = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None:
_lowercase , _lowercase : Optional[int] = analyze_text(SCREAMING_SNAKE_CASE )
_lowercase : Tuple = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
_lowercase : Any = sum(single_char_strings.values() )
# one length string
_lowercase : List[str] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_lowercase : Optional[Any] = single_char_strings[ch]
_lowercase : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(SCREAMING_SNAKE_CASE ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
_lowercase : str = sum(two_char_strings.values() )
_lowercase : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_lowercase : int = cha + cha
if sequence in two_char_strings:
_lowercase : Any = two_char_strings[sequence]
_lowercase : int = int(SCREAMING_SNAKE_CASE ) / all_sum
my_sec_sum += prob * math.loga(SCREAMING_SNAKE_CASE )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> tuple[dict, dict]:
_lowercase : Tuple = Counter() # type: ignore
_lowercase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __magic_name__ ( ) -> Dict:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[str] = value
elif weight_type == "weight_g":
_lowercase : Any = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : List[str] = value
else:
_lowercase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase : Union[str, Any] = True
if "*" in mapped_key:
_lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Optional[int] = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[Any] = 'weight_v'
elif "weight" in name:
_lowercase : str = 'weight'
elif "bias" in name:
_lowercase : Any = 'bias'
else:
_lowercase : str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = full_name.split('conv_layers.' )[-1]
_lowercase : Any = name.split('.' )
_lowercase : Optional[Any] = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
_lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Dict = target_dict.pad_index
_lowercase : Dict = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : List[Any] = len(target_dict.symbols )
_lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
_lowercase : int = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : str = True if config.feat_extract_norm == 'layer' else False
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
_lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 677 | 1 |
from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = size
# approximate the overall size of segment tree with given value
_lowercase : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_lowercase : List[str] = [0 for i in range(0 , 4 * size )]
_lowercase : List[Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __a ( self , _lowerCAmelCase ):
return idx * 2
def __a ( self , _lowerCAmelCase ):
return idx * 2 + 1
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if left_element == right_element:
_lowercase : Any = a[left_element - 1]
else:
_lowercase : List[Any] = (left_element + right_element) // 2
self.build(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.build(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if self.flag[idx] is True:
_lowercase : Optional[Any] = self.lazy[idx]
_lowercase : List[str] = False
if left_element != right_element:
_lowercase : int = self.lazy[idx]
_lowercase : Any = self.lazy[idx]
_lowercase : Union[str, Any] = True
_lowercase : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_lowercase : str = val
if left_element != right_element:
_lowercase : str = val
_lowercase : List[Any] = val
_lowercase : Optional[int] = True
_lowercase : Optional[int] = True
return True
_lowercase : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.update(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
return True
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if self.flag[idx] is True:
_lowercase : List[Any] = self.lazy[idx]
_lowercase : List[str] = False
if left_element != right_element:
_lowercase : Union[str, Any] = self.lazy[idx]
_lowercase : Union[str, Any] = self.lazy[idx]
_lowercase : int = True
_lowercase : Any = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_lowercase : Dict = (left_element + right_element) // 2
_lowercase : Union[str, Any] = self.query(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.query(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return max(_lowerCAmelCase , _lowerCAmelCase )
def __str__( self ):
return str([self.query(1 , 1 , self.size , _lowerCAmelCase , _lowerCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCamelCase = 15
UpperCamelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 677 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ):
_lowercase : List[str] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : str = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[Any] = vocab_size
_lowercase : Optional[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : Dict = initializer_range
_lowercase : List[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Dict = scope
_lowercase : List[Any] = range_bbox
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase : List[str] = bbox[i, j, 3]
_lowercase : Optional[int] = bbox[i, j, 1]
_lowercase : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase : Dict = bbox[i, j, 2]
_lowercase : Dict = bbox[i, j, 0]
_lowercase : int = t
_lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : List[str] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase )
_lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : str = self.num_labels
_lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase )
_lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.num_labels
_lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[Any] = config_and_inputs
_lowercase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = 10
def __a ( self ):
_lowercase : Optional[int] = TFLayoutLMModelTester(self )
_lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip('Onnx compliancy broke with TF 2.10' )
def __a ( self ):
pass
def __magic_name__ ( ) -> Optional[int]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
_lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231
_lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
_lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231
_lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
_lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the sequence output on [0, :3, :3]
_lowercase : Optional[Any] = tf.convert_to_tensor(
[[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) )
# test the pooled output on [1, :3]
_lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) )
@slow
def __a ( self ):
# initialize model with randomly initialized sequence classification head
_lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Any = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
_lowercase : List[Any] = outputs.loss
_lowercase : Any = (2,)
self.assertEqual(loss.shape , _lowerCAmelCase )
# test the shape of the logits
_lowercase : str = outputs.logits
_lowercase : Dict = (2, 2)
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : Dict = model(
input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Dict = outputs.logits
_lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape , _lowerCAmelCase )
@slow
def __a ( self ):
# initialize model with randomly initialized token classification head
_lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
_lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
# test the shape of the logits
_lowercase : Any = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase )
self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
| 677 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase = float("nan")
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : List[str] = sys.stdout
_lowercase : Dict = open(_lowerCAmelCase , 'a' )
def __getattr__( self , _lowerCAmelCase ):
return getattr(self.stdout , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
self.stdout.write(_lowerCAmelCase )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , _lowerCAmelCase , 0 , re.M ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
_lowercase : int = []
# deal with critical env vars
_lowercase : int = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
_lowercase : Tuple = os.environ.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
_lowercase : Tuple = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(SCREAMING_SNAKE_CASE )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_lowercase : Any = []
_lowercase : Optional[Any] = ''
while len(SCREAMING_SNAKE_CASE ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ''
return "\\\n".join(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
# unwrap multi-line input
_lowercase : List[str] = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
_lowercase : int = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
_lowercase : Tuple = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
_lowercase : Optional[int] = subprocess.run(SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
_lowercase : Any = variation.replace(' ' , '-' )
with open(Path(SCREAMING_SNAKE_CASE ) / F"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(SCREAMING_SNAKE_CASE ) / F"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
_lowercase : Union[str, Any] = json.load(SCREAMING_SNAKE_CASE )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Tuple:
_lowercase : str = []
_lowercase : List[Any] = []
_lowercase : Any = F"""{id}: {variation:<{longest_variation_len}}"""
_lowercase : Union[str, Any] = F"""{preamble}: """
_lowercase : List[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(SCREAMING_SNAKE_CASE ) , desc=SCREAMING_SNAKE_CASE , leave=SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = process_run_single(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(SCREAMING_SNAKE_CASE ):
metrics.append(SCREAMING_SNAKE_CASE )
results.append(SCREAMING_SNAKE_CASE )
outcome += "✓"
else:
outcome += "✘"
_lowercase : Optional[int] = F"""\33[2K\r{outcome}"""
if len(SCREAMING_SNAKE_CASE ) > 0:
_lowercase : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_lowercase : str = round(mean_metrics[target_metric_key] , 2 )
_lowercase : Any = F"""{outcome} {mean_target}"""
if len(SCREAMING_SNAKE_CASE ) > 1:
results_str += F""" {tuple(round(SCREAMING_SNAKE_CASE , 2 ) for x in results )}"""
print(SCREAMING_SNAKE_CASE )
_lowercase : str = variation
return mean_metrics
else:
print(SCREAMING_SNAKE_CASE )
return {variation_key: variation, target_metric_key: nan}
def __magic_name__ ( ) -> Optional[Any]:
_lowercase : List[str] = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
_lowercase : Dict = pd.DataFrame(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = 'variation'
_lowercase : Union[str, Any] = 'diff_%'
_lowercase : Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_lowercase : List[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(SCREAMING_SNAKE_CASE ):
# as a fallback, use the minimal value as the sentinel
_lowercase : Tuple = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(SCREAMING_SNAKE_CASE ):
_lowercase : Any = df.apply(
lambda SCREAMING_SNAKE_CASE : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
_lowercase : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_lowercase : Tuple = df.reindex(SCREAMING_SNAKE_CASE , axis='columns' ) # reorder cols
# capitalize
_lowercase : Tuple = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
_lowercase : List[str] = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace('_' , '<br>' ) , axis='columns' )
_lowercase : List[str] = df.rename(lambda SCREAMING_SNAKE_CASE : c.replace('_' , '\n' ) , axis='columns' )
_lowercase : int = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=SCREAMING_SNAKE_CASE , floatfmt='.2f' )]
print('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( ) -> Tuple:
_lowercase : str = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Base cmd' , )
parser.add_argument(
'--variations' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , nargs='+' , required=SCREAMING_SNAKE_CASE , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=SCREAMING_SNAKE_CASE , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=SCREAMING_SNAKE_CASE , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=SCREAMING_SNAKE_CASE , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=SCREAMING_SNAKE_CASE , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
_lowercase : Dict = parser.parse_args()
_lowercase : List[str] = args.output_dir
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
_lowercase : Dict = get_base_command(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# split each dimension into its --foo variations
_lowercase : Tuple = [list(map(str.strip , re.split(R'\|' , SCREAMING_SNAKE_CASE ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_lowercase : Any = list(map(str.strip , map(' '.join , itertools.product(*SCREAMING_SNAKE_CASE ) ) ) )
_lowercase : Dict = max(len(SCREAMING_SNAKE_CASE ) for x in variations )
# split wanted keys
_lowercase : Optional[int] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_lowercase : List[str] = F"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
_lowercase : int = Tee(SCREAMING_SNAKE_CASE )
print(F"""\n*** Running {len(SCREAMING_SNAKE_CASE )} benchmarks:""" )
print(F"""Base command: {' '.join(SCREAMING_SNAKE_CASE )}""" )
_lowercase : Tuple = 'variation'
_lowercase : Dict = []
for id, variation in enumerate(tqdm(SCREAMING_SNAKE_CASE , desc='Total completion: ' , leave=SCREAMING_SNAKE_CASE ) ):
_lowercase : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.repeat_times , SCREAMING_SNAKE_CASE , args.verbose , ) )
process_results(SCREAMING_SNAKE_CASE , args.target_metric_key , SCREAMING_SNAKE_CASE , args.base_variation , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[str] = logging.get_logger()
# the current default level is logging.WARNING
_lowercase : Union[str, Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = logging.get_verbosity()
_lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_lowerCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase )
_lowercase : Optional[Any] = logging.log_levels[env_level_str]
_lowercase : Dict = logging.get_verbosity()
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_lowercase : Any = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_lowercase : Tuple = logging.logging.getLogger()
with CaptureLogger(_lowerCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
_lowercase : List[str] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowerCAmelCase ) as cl:
logger.warning_advice(_lowerCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def __magic_name__ ( ) -> List[str]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 677 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
random.seed(SCREAMING_SNAKE_CASE )
np.random.seed(SCREAMING_SNAKE_CASE )
torch.manual_seed(SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = 0.99_99 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0 , _lowerCAmelCase = False , _lowerCAmelCase = 1.0 , _lowerCAmelCase = 2 / 3 , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
if isinstance(_lowerCAmelCase , torch.nn.Module ):
_lowercase : int = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase , )
_lowercase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowercase : Dict = True
if kwargs.get('max_value' , _lowerCAmelCase ) is not None:
_lowercase : int = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowercase : Dict = kwargs['max_value']
if kwargs.get('min_value' , _lowerCAmelCase ) is not None:
_lowercase : List[str] = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowercase : Optional[Any] = kwargs['min_value']
_lowercase : Optional[Any] = list(_lowerCAmelCase )
_lowercase : Tuple = [p.clone().detach() for p in parameters]
if kwargs.get('device' , _lowerCAmelCase ) is not None:
_lowercase : Union[str, Any] = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
self.to(device=kwargs['device'] )
_lowercase : Optional[Any] = None
_lowercase : List[Any] = decay
_lowercase : str = min_decay
_lowercase : Dict = update_after_step
_lowercase : Optional[Any] = use_ema_warmup
_lowercase : Dict = inv_gamma
_lowercase : Union[str, Any] = power
_lowercase : Optional[int] = 0
_lowercase : Union[str, Any] = None # set in `step()`
_lowercase : Union[str, Any] = model_cls
_lowercase : Dict = model_config
@classmethod
def __a ( cls , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase : List[Any] = model_cls.load_config(_lowerCAmelCase , return_unused_kwargs=_lowerCAmelCase )
_lowercase : Optional[int] = model_cls.from_pretrained(_lowerCAmelCase )
_lowercase : int = cls(model.parameters() , model_cls=_lowerCAmelCase , model_config=model.config )
ema_model.load_state_dict(_lowerCAmelCase )
return ema_model
def __a ( self , _lowerCAmelCase ):
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
_lowercase : List[Any] = self.model_cls.from_config(self.model_config )
_lowercase : Dict = self.state_dict()
state_dict.pop('shadow_params' , _lowerCAmelCase )
model.register_to_config(**_lowerCAmelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[int] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowercase : List[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowercase : Dict = (1 + step) / (1_0 + step)
_lowercase : List[str] = min(_lowerCAmelCase , self.decay )
# make sure decay is not smaller than min_decay
_lowercase : Dict = max(_lowerCAmelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __a ( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , torch.nn.Module ):
_lowercase : List[Any] = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _lowerCAmelCase , standard_warn=_lowerCAmelCase , )
_lowercase : Optional[int] = parameters.parameters()
_lowercase : Any = list(_lowerCAmelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowercase : Any = self.get_decay(self.optimization_step )
_lowercase : Union[str, Any] = decay
_lowercase : int = 1 - decay
_lowercase : Optional[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _lowerCAmelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowercase : Union[str, Any] = deepspeed.zero.GatheredParameters(_lowerCAmelCase , modifier_rank=_lowerCAmelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Dict = list(_lowerCAmelCase )
for s_param, param in zip(self.shadow_params , _lowerCAmelCase ):
param.data.copy_(s_param.to(param.device ).data )
def __a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None ):
_lowercase : str = [
p.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ) if p.is_floating_point() else p.to(device=_lowerCAmelCase )
for p in self.shadow_params
]
def __a ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __a ( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = [param.detach().cpu().clone() for param in parameters]
def __a ( self , _lowerCAmelCase ):
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , _lowerCAmelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowercase : str = None
def __a ( self , _lowerCAmelCase ):
_lowercase : Union[str, Any] = copy.deepcopy(_lowerCAmelCase )
_lowercase : Tuple = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
_lowercase : List[str] = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , _lowerCAmelCase ):
raise ValueError('Invalid min_decay' )
_lowercase : Optional[Any] = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , _lowerCAmelCase ):
raise ValueError('Invalid optimization_step' )
_lowercase : Any = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , _lowerCAmelCase ):
raise ValueError('Invalid update_after_step' )
_lowercase : Any = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _lowerCAmelCase ):
raise ValueError('Invalid use_ema_warmup' )
_lowercase : List[Any] = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
_lowercase : int = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
_lowercase : Any = state_dict.get('shadow_params' , _lowerCAmelCase )
if shadow_params is not None:
_lowercase : Tuple = shadow_params
if not isinstance(self.shadow_params , _lowerCAmelCase ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(_lowerCAmelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 677 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
UpperCamelCase = "pt"
elif is_tf_available():
UpperCamelCase = "tf"
else:
UpperCamelCase = "jax"
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = PerceiverTokenizer
_UpperCamelCase : str = False
def __a ( self ):
super().setUp()
_lowercase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __a ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __a ( self , **_lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowercase : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
try:
_lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) )
if max_length is not None and len(_lowerCAmelCase ) > max_length:
_lowercase : Any = toks[:max_length]
if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0:
while len(_lowerCAmelCase ) < min_length:
_lowercase : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
if " " not in output_txt and len(_lowerCAmelCase ) > 1:
_lowercase : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase )
)
if with_prefix_space:
_lowercase : List[Any] = ' ' + output_txt
_lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
return output_txt, output_ids
def __a ( self ):
_lowercase : Dict = self.perceiver_tokenizer
_lowercase : Optional[Any] = 'Unicode €.'
_lowercase : str = tokenizer(_lowerCAmelCase )
_lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' )
_lowercase : Union[str, Any] = tokenizer('e è é ê ë' )
_lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['input_ids'] , _lowerCAmelCase )
# decoding
_lowercase : int = tokenizer.decode(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __a ( self ):
_lowercase : List[str] = self.perceiver_tokenizer
_lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
_lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
if FRAMEWORK != "jax":
_lowercase : int = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def __a ( self ):
_lowercase : List[Any] = self.perceiver_tokenizer
_lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _lowerCAmelCase )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertNotIn('decoder_input_ids' , _lowerCAmelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[int] = self.perceiver_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : Optional[int] = tokenizer(
text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def __a ( self ):
# safety check on max_len default value so we are sure the test works
_lowercase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
_lowercase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[str] = tempfile.mkdtemp()
_lowercase : int = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
_lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase )
_lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
_lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : List[str] = json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(_lowerCAmelCase )
_lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )]
_lowercase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : Optional[int] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[int] = tokenizer_class.from_pretrained(
_lowerCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __a ( self ):
_lowercase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '�' )
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
pass
def __a ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
_lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
_lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
| 677 | 1 |
import math
from datetime import datetime, timedelta
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> datetime:
_lowercase : Any = year % 19
_lowercase : Tuple = year % 4
_lowercase : Tuple = year % 7
_lowercase : Any = math.floor(year / 100 )
_lowercase : int = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_lowercase : int = leap_day_inhibits / 4
_lowercase : List[Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_lowercase : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowercase : Dict = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_lowercase : Dict = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
UpperCamelCase = "will be" if year > datetime.now().year else "was"
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 677 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase = "src/diffusers"
# Matches is_xxx_available()
UpperCamelCase = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
UpperCamelCase = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
UpperCamelCase = "\n{0} = None\n"
UpperCamelCase = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
UpperCamelCase = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : int = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Optional[int]:
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : List[str] = f.readlines()
# Get to the point we do the actual imports for type checking
_lowercase : Dict = 0
_lowercase : List[str] = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_lowercase : Union[str, Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
_lowercase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
_lowercase : Optional[Any] = lines[line_index]
_lowercase : Dict = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
_lowercase : int = objects
else:
line_index += 1
return backend_specific_objects
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE=None ) -> str:
if backend_specific_objects is None:
_lowercase : List[Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_lowercase : int = {}
for backend, objects in backend_specific_objects.items():
_lowercase : Tuple = '[' + ', '.join(F"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']'
_lowercase : Optional[Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
_lowercase : Tuple = dummy_file
return dummy_files
def __magic_name__ ( SCREAMING_SNAKE_CASE=False ) -> int:
_lowercase : Union[str, Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_lowercase : Optional[Any] = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
_lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
_lowercase : Optional[int] = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F"""dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py""" )
for backend in dummy_files.keys()
}
_lowercase : Optional[Any] = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : int = f.read()
else:
_lowercase : Optional[Any] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main """
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F"""diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` """
'to fix this.' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 677 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Tuple = "ClapFeatureExtractor"
_UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowercase : Any = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowercase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
_lowercase : Dict = self.tokenizer.model_input_names
_lowercase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 677 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : nn.Module
_UpperCamelCase : List[nn.Module] = field(default_factory=__snake_case )
_UpperCamelCase : list = field(default_factory=__snake_case )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(_lowerCAmelCase , nn.Convad ) or isinstance(_lowerCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def __a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase_ :
_UpperCamelCase : nn.Module
_UpperCamelCase : nn.Module
_UpperCamelCase : int = 0
_UpperCamelCase : List = field(default_factory=__snake_case )
_UpperCamelCase : List = field(default_factory=__snake_case )
def __call__( self , _lowerCAmelCase ):
_lowercase : Tuple = Tracker(self.dest )(_lowerCAmelCase ).parametrized
_lowercase : List[Any] = Tracker(self.src )(_lowerCAmelCase ).parametrized
_lowercase : Any = list(filter(lambda _lowerCAmelCase : type(_lowerCAmelCase ) not in self.src_skip , _lowerCAmelCase ) )
_lowercase : List[str] = list(filter(lambda _lowerCAmelCase : type(_lowerCAmelCase ) not in self.dest_skip , _lowerCAmelCase ) )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise Exception(
F"""Numbers of operations are different. Source module has {len(_lowerCAmelCase )} operations while"""
F""" destination module has {len(_lowerCAmelCase )}.""" )
for dest_m, src_m in zip(_lowerCAmelCase , _lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ) -> Dict:
print(F"""Converting {name}...""" )
with torch.no_grad():
_lowercase : str = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval()
_lowercase : Tuple = ResNetForImageClassification(SCREAMING_SNAKE_CASE ).eval()
_lowercase : Tuple = ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE )
_lowercase : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE )
assert torch.allclose(from_model(SCREAMING_SNAKE_CASE ) , our_model(SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one."
_lowercase : Dict = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE , )
# we can use the convnext one
_lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE , )
print(F"""Pushed {checkpoint_name}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True ) -> int:
_lowercase : Tuple = 'imagenet-1k-id2label.json'
_lowercase : Tuple = 1_000
_lowercase : List[Any] = (1, num_labels)
_lowercase : int = 'huggingface/label-files'
_lowercase : int = num_labels
_lowercase : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
_lowercase : Optional[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowercase : Union[str, Any] = idalabel
_lowercase : List[Any] = {v: k for k, v in idalabel.items()}
_lowercase : Dict = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 677 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
_lowercase : Any = num_of_nodes
_lowercase : list[list[int]] = []
_lowercase : dict[int, int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __a ( self , _lowerCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowercase : Optional[int] = self.find_component(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
_lowercase : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
_lowercase : Any = self.find_component(_lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCAmelCase )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = 0
_lowercase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowercase : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowercase , _lowercase , _lowercase : List[str] = edge
_lowercase : Union[str, Any] = self.m_component[u]
_lowercase : Union[str, Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowercase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase , _lowercase , _lowercase : int = edge
_lowercase : Optional[int] = self.m_component[u]
_lowercase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_lowercase : str = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __magic_name__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
UpperCamelCase = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[str]:
_lowercase : Optional[Any] = set()
# keep track of all the paths to be checked
_lowercase : Dict = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowercase : str = queue.pop(0 )
# get the last node from the path
_lowercase : Union[str, Any] = path[-1]
if node not in explored:
_lowercase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowercase : Any = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowercase : List[Any] = [start]
_lowercase : Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
_lowercase : Dict = {start: 0, target: -1}
while queue:
_lowercase : Any = queue.pop(0 )
if node == target:
_lowercase : int = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
_lowercase : Dict = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 677 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Tuple = {}
_lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids']
_lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] )
return output
UpperCamelCase = HfArgumentParser(PretokenizationArguments)
UpperCamelCase = parser.parse_args()
if args.num_workers is None:
UpperCamelCase = multiprocessing.cpu_count()
UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCamelCase = time.time()
UpperCamelCase = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
UpperCamelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCamelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 677 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "" ) -> dict[str, float]:
_lowercase : Any = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
_lowercase : List[str] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : str = soup.find_all('td' , attrs='titleColumn' )
_lowercase : List[str] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
}
def __magic_name__ ( SCREAMING_SNAKE_CASE = "IMDb_Top_250_Movies.csv" ) -> None:
_lowercase : int = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE , 'w' , newline='' ) as out_file:
_lowercase : Dict = csv.writer(SCREAMING_SNAKE_CASE )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 677 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
UpperCamelCase = {"facebook/bart-base": BartTokenizer}
def __magic_name__ ( ) -> str:
_lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' )
_lowercase : Optional[Any] = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]:
_lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE )
if model_name in ["facebook/bart-base"]:
_lowercase : Dict = 0
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
model.eval()
_lowercase : List[Any] = None
_lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) )
with torch.no_grad():
_lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.'
_lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_lowercase : str = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=SCREAMING_SNAKE_CASE , )
logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) )
logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) )
_lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = ort_sess.run(
SCREAMING_SNAKE_CASE , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(SCREAMING_SNAKE_CASE ),
'max_length': np.array(SCREAMING_SNAKE_CASE ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def __magic_name__ ( ) -> Any:
_lowercase : Dict = parse_args()
_lowercase : Union[str, Any] = 5
_lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase : Optional[Any] = torch.device(args.device )
_lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(SCREAMING_SNAKE_CASE )
if args.max_length:
_lowercase : Any = args.max_length
if args.num_beams:
_lowercase : List[str] = args.num_beams
if args.output_file_path:
_lowercase : Union[str, Any] = args.output_file_path
else:
_lowercase : Tuple = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["ConditionalDetrFeatureExtractor"]
UpperCamelCase = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
_lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowercase : Optional[Any] = parent
_lowercase : str = batch_size
_lowercase : Optional[int] = seq_length
_lowercase : Tuple = is_training
_lowercase : List[Any] = use_input_mask
_lowercase : Optional[Any] = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : str = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : Optional[Any] = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Union[str, Any] = num_choices
_lowercase : List[str] = scope
_lowercase : Union[str, Any] = embedding_size
def __a ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Optional[int] = None
if self.use_input_mask:
_lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Dict = None
_lowercase : Any = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase )
_lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
_lowercase : Tuple = [input_ids, input_mask]
_lowercase : str = model(_lowerCAmelCase )
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase )
_lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Optional[int] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase )
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = self.num_labels
_lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase )
_lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.num_choices
_lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
_lowercase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowercase : Union[str, Any] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase )
_lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : List[str] = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase )
_lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowercase : int = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ):
_lowercase : List[str] = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : int = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self )
_lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase )
def __a ( self ):
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase )
@slow
def __a ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
_lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase : List[str] = model(_lowerCAmelCase )[0]
_lowercase : str = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , _lowerCAmelCase )
_lowercase : List[Any] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE = 100 ) -> int:
_lowercase : List[str] = set()
_lowercase : List[str] = 0
_lowercase : int = n + 1 # maximum limit
for a in range(2 , SCREAMING_SNAKE_CASE ):
for b in range(2 , SCREAMING_SNAKE_CASE ):
_lowercase : int = a**b # calculates the current power
collect_powers.add(SCREAMING_SNAKE_CASE ) # adds the result to the set
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 677 |
import qiskit
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts:
_lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 677 | 1 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return abs(SCREAMING_SNAKE_CASE ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowercase , _lowercase : int = y, x % y
return abs(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> List[Any]:
try:
_lowercase : int = input('Enter two integers separated by comma (,): ' ).split(',' )
_lowercase : Union[str, Any] = int(nums[0] )
_lowercase : int = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 677 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
_lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ):
_lowercase : List[str] = parent
_lowercase : List[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Tuple = use_labels
_lowercase : Dict = vocab_size
_lowercase : Any = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Any = hidden_act
_lowercase : Optional[Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : str = eos_token_id
_lowercase : int = pad_token_id
_lowercase : Tuple = bos_token_id
_lowercase : List[Any] = initializer_range
def __a ( self ):
_lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
_lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[Any] = 2_0
_lowercase : List[Any] = model_class_name(_lowerCAmelCase )
_lowercase : List[Any] = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : int = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
_lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = 2_0
_lowercase : Any = model_class_name(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] )
_lowercase , _lowercase : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowercase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
_lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
_lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
_UpperCamelCase : Tuple = 99
def __a ( self ):
_lowercase : Dict = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_lowercase : Union[str, Any] = input_ids.shape[0]
_lowercase : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ):
_lowercase , _lowercase , _lowercase : int = self._get_config_and_data()
_lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase )
_lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase )
_lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowercase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
_lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ):
_UpperCamelCase : int = True
_UpperCamelCase : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ):
_lowercase : List[str] = FlaxBlenderbotSmallModelTester(self )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ):
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowercase : int = model_class(_lowerCAmelCase )
_lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowercase : List[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
_lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ):
for model_class_name in self.all_model_classes:
_lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id
_lowercase : int = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 677 | 1 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = "▁"
UpperCamelCase = {"vocab_file": "prophetnet.tokenizer"}
UpperCamelCase = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
UpperCamelCase = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
UpperCamelCase = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Any = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as reader:
_lowercase : List[str] = reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = token.rstrip('\n' )
_lowercase : List[str] = index
return vocab
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
_lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
_lowercase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_lowercase : Tuple = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(1_0 ):
_lowercase : Union[str, Any] = F"""[unused{i}]"""
_lowercase : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_lowercase : Optional[Any] = 1_2
_lowercase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_lowerCAmelCase )
def __getstate__( self ):
_lowercase : Tuple = self.__dict__.copy()
_lowercase : Optional[int] = None
return state
def __setstate__( self , _lowerCAmelCase ):
_lowercase : Optional[Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowercase : List[Any] = {}
_lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + [1]
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Any = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self ):
return len(self.sp_model ) + self.fairseq_offset
def __a ( self ):
_lowercase : Any = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , _lowerCAmelCase ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : Optional[Any] = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , _lowerCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , _lowerCAmelCase ):
_lowercase : int = ''.join(_lowerCAmelCase ).replace(_lowerCAmelCase , ' ' ).strip()
return out_string
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : Tuple = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , 'wb' ) as fi:
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_lowercase : Tuple = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 677 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Dict = "longformer"
def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowercase : Optional[int] = attention_window
_lowercase : str = sep_token_id
_lowercase : Optional[Any] = bos_token_id
_lowercase : List[Any] = eos_token_id
_lowercase : Optional[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : List[str] = hidden_act
_lowercase : List[str] = intermediate_size
_lowercase : List[Any] = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : Any = max_position_embeddings
_lowercase : int = type_vocab_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = layer_norm_eps
_lowercase : List[str] = onnx_export
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = True
@property
def __a ( self ):
if self.task == "multiple-choice":
_lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __a ( self ):
_lowercase : Optional[int] = super().outputs
if self.task == "default":
_lowercase : List[str] = {0: 'batch'}
return outputs
@property
def __a ( self ):
return 1E-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ):
_lowercase : int = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase : str = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase : Any = 1
return inputs
| 677 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
UpperCamelCase = "1"
UpperCamelCase = "0"
UpperCamelCase = "1"
UpperCamelCase = ort.SessionOptions()
UpperCamelCase = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
UpperCamelCase = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
UpperCamelCase = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
UpperCamelCase = ort.RunOptions()
UpperCamelCase = 128
UpperCamelCase = 1
UpperCamelCase = np.ones((batch, sequence), dtype=np.intaa)
UpperCamelCase = np.ones((batch, sequence), dtype=np.intaa)
UpperCamelCase = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
UpperCamelCase = time.time()
UpperCamelCase = 2_000
UpperCamelCase = {}
for iter in range(max_iters):
UpperCamelCase = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1_000 / max_iters))
| 677 |
from __future__ import annotations
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool:
return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 677 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCamelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , ) -> Dict:
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Optional[Any]:
_lowercase : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowercase : int = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_lowercase : Tuple = 'cpu'
_lowercase : Optional[int] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
_lowercase : str = Path(SCREAMING_SNAKE_CASE )
# TEXT ENCODER
_lowercase : Any = pipeline.text_encoder.config.max_position_embeddings
_lowercase : List[str] = pipeline.text_encoder.config.hidden_size
_lowercase : Dict = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.text_encoder
# UNET
_lowercase : Optional[Any] = pipeline.unet.config.in_channels
_lowercase : Any = pipeline.unet.config.sample_size
_lowercase : List[str] = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=SCREAMING_SNAKE_CASE , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , )
_lowercase : Any = str(unet_path.absolute().as_posix() )
_lowercase : Dict = os.path.dirname(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = onnx.load(SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(SCREAMING_SNAKE_CASE )
os.mkdir(SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , save_as_external_data=SCREAMING_SNAKE_CASE , all_tensors_to_one_file=SCREAMING_SNAKE_CASE , location='weights.pb' , convert_attribute=SCREAMING_SNAKE_CASE , )
del pipeline.unet
# VAE ENCODER
_lowercase : Optional[Any] = pipeline.vae
_lowercase : Dict = vae_encoder.config.in_channels
_lowercase : Optional[int] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowercase : Any = lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : vae_encoder.encode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
# VAE DECODER
_lowercase : str = pipeline.vae
_lowercase : int = vae_decoder.config.latent_channels
_lowercase : Optional[Any] = vae_decoder.config.out_channels
# forward only through the decoder part
_lowercase : Union[str, Any] = vae_encoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowercase : Optional[int] = pipeline.safety_checker
_lowercase : Optional[Any] = safety_checker.config.vision_config.num_channels
_lowercase : Any = safety_checker.config.vision_config.image_size
_lowercase : Tuple = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
torch.randn(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=SCREAMING_SNAKE_CASE , )
del pipeline.safety_checker
_lowercase : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
_lowercase : Optional[Any] = pipeline.feature_extractor
else:
_lowercase : str = None
_lowercase : Any = None
_lowercase : str = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
print('ONNX pipeline saved to' , SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
_lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 677 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __magic_name__ ( ) -> int:
_lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=SCREAMING_SNAKE_CASE , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=SCREAMING_SNAKE_CASE , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=SCREAMING_SNAKE_CASE , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=SCREAMING_SNAKE_CASE , default=0 , help='cuda_id.' , )
_lowercase : Dict = parser.parse_args()
return args
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if not len(SCREAMING_SNAKE_CASE ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
_lowercase , _lowercase : Optional[Any] = imgs[0].size
_lowercase : List[str] = Image.new('RGB' , size=(cols * w, rows * h) )
_lowercase , _lowercase : List[Any] = grid.size
for i, img in enumerate(SCREAMING_SNAKE_CASE ):
grid.paste(SCREAMING_SNAKE_CASE , box=(i % cols * w, i // cols * h) )
return grid
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="robotic cat with wings" , SCREAMING_SNAKE_CASE=7.5 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=42 , ) -> Dict:
_lowercase : Optional[Any] = torch.Generator(pipeline.device ).manual_seed(SCREAMING_SNAKE_CASE )
_lowercase : Dict = pipeline(
SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , ).images
_lowercase : Optional[int] = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
_lowercase : Optional[int] = image_grid(SCREAMING_SNAKE_CASE , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCamelCase = parse_args()
# Load models and create wrapper for stable diffusion
UpperCamelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
UpperCamelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
UpperCamelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
UpperCamelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
UpperCamelCase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCamelCase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
UpperCamelCase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
UpperCamelCase = unet.to(torch.device("cuda", args.cuda_id))
UpperCamelCase = pipeline.to(unet.device)
UpperCamelCase , UpperCamelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
UpperCamelCase = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 677 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["CLIPFeatureExtractor"]
UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 677 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCamelCase = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ElectraTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars
):
_lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) )
_lowercase : Dict = do_lower_case
_lowercase : Optional[Any] = strip_accents
_lowercase : Any = tokenize_chinese_chars
_lowercase : Tuple = normalizer_class(**_lowerCAmelCase )
_lowercase : Union[str, Any] = do_lower_case
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : str = [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677 |
from collections.abc import Sequence
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
_lowercase : Optional[Any] = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 677 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.