code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
from typing import TypedDict
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str
_lowerCamelCase : int
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__UpperCamelCase ) )]
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
A_ = all_rotations(__UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
A_ = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__UpperCamelCase ),
}
return response
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
A_ = int(__UpperCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__UpperCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
A_ = [""] * len(__UpperCamelCase )
for _ in range(len(__UpperCamelCase ) ):
for i in range(len(__UpperCamelCase ) ):
A_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__a :Any = 'Provide a string that I will generate its BWT transform: '
__a :Dict = input(entry_msg).strip()
__a :List[Any] = bwt_transform(s)
print(
F"Burrows Wheeler transform for string '{s}' results "
F"in '{result['bwt_string']}'"
)
__a :List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
F"we get original string '{original_string}'"
)
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Any = logging.get_logger(__name__)
__a :Optional[int] = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = 'swinv2'
_lowerCamelCase : Any = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Union[str, Any] , UpperCAmelCase : Dict=224 , UpperCAmelCase : Dict=4 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=96 , UpperCAmelCase : Optional[Any]=[2, 2, 6, 2] , UpperCAmelCase : int=[3, 6, 12, 24] , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Any=4.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : Tuple=False , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Any=1E-5 , UpperCAmelCase : Optional[int]=32 , **UpperCAmelCase : Any , ):
super().__init__(**UpperCAmelCase )
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = embed_dim
A_ = depths
A_ = len(UpperCAmelCase )
A_ = num_heads
A_ = window_size
A_ = mlp_ratio
A_ = qkv_bias
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = drop_path_rate
A_ = hidden_act
A_ = use_absolute_embeddings
A_ = layer_norm_eps
A_ = initializer_range
A_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A_ = int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
A_ = (0, 0, 0, 0)
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any=2 , UpperCAmelCase : str=32 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : int=3 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=32 , UpperCAmelCase : int=4 , UpperCAmelCase : Tuple=[0, 1, 2, 3] , UpperCAmelCase : List[str]=4 , UpperCAmelCase : str=37 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : int=[1, 384, 24, 24] , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=None , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self : str ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self : List[str] ):
A_ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] ):
A_ = DPTModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = DPTForDepthEstimation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[Any] ):
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self : int ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowerCamelCase : Dict = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : List[str] = False
def __A ( self : Optional[int] ):
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def __A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __A ( self : Any ):
pass
def __A ( self : List[str] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : str ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
def __A ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(UpperCAmelCase ):
continue
A_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = model(**UpperCAmelCase ).loss
loss.backward()
def __A ( self : int ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = model(**UpperCAmelCase ).loss
loss.backward()
def __A ( self : List[str] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : Tuple ):
pass
@slow
def __A ( self : Optional[Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Optional[int] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = "add"
with self.assertRaises(UpperCAmelCase ):
A_ = DPTForDepthEstimation(UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
A_ = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
A_ = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(UpperCAmelCase )
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(**UpperCAmelCase )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , UpperCAmelCase )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , UpperCAmelCase , atol=1E-4 ) )
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A_ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
A_ = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , "snapshots" ) )]
A_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Dict ):
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase )
A_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A_ = jax.random.PRNGKey(0 )
A_ = 4
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
A_ = replicate(UpperCAmelCase )
A_ = jax.random.split(UpperCAmelCase , UpperCAmelCase )
A_ = shard(UpperCAmelCase )
A_ = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
A_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def __A ( self : Union[str, Any] ):
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=UpperCAmelCase )
A_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A_ = jax.random.PRNGKey(0 )
A_ = 50
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
A_ = replicate(UpperCAmelCase )
A_ = jax.random.split(UpperCAmelCase , UpperCAmelCase )
A_ = shard(UpperCAmelCase )
A_ = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def __A ( self : List[str] ):
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
A_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A_ = jax.random.PRNGKey(0 )
A_ = 50
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
A_ = replicate(UpperCAmelCase )
A_ = jax.random.split(UpperCAmelCase , UpperCAmelCase )
A_ = shard(UpperCAmelCase )
A_ = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def __A ( self : Union[str, Any] ):
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
A_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A_ = jax.random.PRNGKey(0 )
A_ = 50
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
A_ = replicate(UpperCAmelCase )
A_ = jax.random.split(UpperCAmelCase , UpperCAmelCase )
A_ = shard(UpperCAmelCase )
A_ = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def __A ( self : Dict ):
A_ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
A_ = scheduler.create_state()
A_ = scheduler_state
A_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A_ = jax.random.PRNGKey(0 )
A_ = 50
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
A_ = replicate(UpperCAmelCase )
A_ = jax.random.split(UpperCAmelCase , UpperCAmelCase )
A_ = shard(UpperCAmelCase )
A_ = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def __A ( self : str ):
A_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A_ = jax.device_count()
A_ = num_samples * [prompt]
A_ = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
A_ = replicate(UpperCAmelCase )
A_ = pipeline.prepare_inputs(UpperCAmelCase )
A_ = shard(UpperCAmelCase )
A_ = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A_ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A_ , A_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
A_ = replicate(UpperCAmelCase )
A_ = pipeline.prepare_inputs(UpperCAmelCase )
A_ = shard(UpperCAmelCase )
A_ = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A_ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__a :List[str] = TypeVar('T')
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : T ):
A_ = data
A_ = None
def __str__( self : Union[str, Any] ):
return f'''{self.data}'''
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
A_ = None
def __iter__( self : Dict ):
A_ = self.top
while node:
yield node.data
A_ = node.next
def __str__( self : Tuple ):
return "->".join([str(UpperCAmelCase ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def __A ( self : Optional[int] ):
return self.top is None
def __A ( self : Dict , UpperCAmelCase : T ):
A_ = Node(UpperCAmelCase )
if not self.is_empty():
A_ = self.top
A_ = node
def __A ( self : Any ):
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , UpperCAmelCase )
A_ = self.top
A_ = self.top.next
return pop_node.data
def __A ( self : Union[str, Any] ):
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def __A ( self : List[str] ):
A_ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] = IFPipeline
_lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
_lowerCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def __A ( self : Optional[Any] ):
return self._get_dummy_components()
def __A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
A_ = torch.manual_seed(UpperCAmelCase )
else:
A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
A_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __A ( self : List[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __A ( self : List[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __A ( self : Union[str, Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __A ( self : List[Any] ):
self._test_save_load_local()
def __A ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __A ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Tuple ):
# if
A_ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
A_ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
A_ , A_ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A_ = None
A_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A_ = IFImgaImgPipeline(**pipe_a.components )
A_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A_ = IFInpaintingPipeline(**pipe_a.components )
A_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict ):
# pipeline 1
_start_torch_memory_measurement()
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = pipe_a(
prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , num_inference_steps=2 , generator=UpperCAmelCase , output_type="np" , )
A_ = output.images[0]
assert image.shape == (64, 64, 3)
A_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase )
A_ = pipe_a(
prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
A_ = output.images[0]
assert image.shape == (256, 256, 3)
A_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : int ):
# pipeline 1
_start_torch_memory_measurement()
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase )
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = pipe_a(
prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=2 , generator=UpperCAmelCase , output_type="np" , )
A_ = output.images[0]
assert image.shape == (64, 64, 3)
A_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase )
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase )
A_ = pipe_a(
prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , original_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
A_ = output.images[0]
assert image.shape == (256, 256, 3)
A_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
# pipeline 1
_start_torch_memory_measurement()
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase )
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCAmelCase )
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = pipe_a(
prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , num_inference_steps=2 , generator=UpperCAmelCase , output_type="np" , )
A_ = output.images[0]
assert image.shape == (64, 64, 3)
A_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase )
A_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase )
A_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCAmelCase )
A_ = pipe_a(
prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , original_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
A_ = output.images[0]
assert image.shape == (256, 256, 3)
A_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__a :Optional[int] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
A_ = downstream_dict["projector.weight"]
A_ = downstream_dict["projector.bias"]
A_ = downstream_dict["model.post_net.linear.weight"]
A_ = downstream_dict["model.post_net.linear.bias"]
return model
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
A_ = downstream_dict["model.linear.weight"]
A_ = downstream_dict["model.linear.bias"]
return model
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = UniSpeechSatForXVector.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase )
A_ = downstream_dict["connector.weight"]
A_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
A_ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
A_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
A_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
A_ = checkpoint["Downstream"]
A_ = UniSpeechSatConfig.from_pretrained(__UpperCamelCase )
A_ = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,do_normalize=__UpperCamelCase )
A_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
A_ = convert_classification(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
A_ = convert_diarization(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
elif arch.endswith("ForXVector" ):
A_ = convert_xvector(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
A_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__a :Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['image_processor', 'tokenizer']
_lowerCamelCase : List[str] = 'ChineseCLIPImageProcessor'
_lowerCamelCase : List[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : str , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Optional[Any] ):
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
A_ = kwargs.pop("feature_extractor" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
A_ = self.image_processor
def __call__( self : Optional[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Optional[Any] ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A_ = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def __A ( self : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[Any] ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def __A ( self : Any ):
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
__a :List[str] = 0 # The first color of the flag.
__a :int = 1 # The second color of the flag.
__a :Optional[int] = 2 # The third color of the flag.
__a :Any = (red, white, blue)
def __snake_case ( __UpperCamelCase : list ):
"""simple docstring"""
if not sequence:
return []
if len(__UpperCamelCase ) == 1:
return list(__UpperCamelCase )
A_ = 0
A_ = len(__UpperCamelCase ) - 1
A_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
A_ , A_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A_ , A_ = sequence[high], sequence[mid]
high -= 1
else:
A_ = f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__a :str = input('Enter numbers separated by commas:\n').strip()
__a :Tuple = [int(item.strip()) for item in user_input.split(',')]
print(F"{dutch_national_flag_sort(unsorted)}")
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def __snake_case ( __UpperCamelCase : list[list[int]] ,__UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] ,__UpperCamelCase ,__UpperCamelCase ):
# Color current vertex
A_ = i
# Validate coloring
if util_color(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,index + 1 ):
return True
# Backtrack
A_ = -1
return False
def __snake_case ( __UpperCamelCase : list[list[int]] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,0 ):
return colored_vertices
return []
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=99 , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=2 , UpperCAmelCase : Any=32 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Tuple=30 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Dict=None , ):
A_ = parent
A_ = batch_size
A_ = decoder_seq_length
# For common tests
A_ = self.decoder_seq_length
A_ = is_training
A_ = use_attention_mask
A_ = use_labels
A_ = vocab_size
A_ = d_model
A_ = d_model
A_ = decoder_layers
A_ = decoder_layers
A_ = decoder_ffn_dim
A_ = decoder_attention_heads
A_ = decoder_attention_heads
A_ = eos_token_id
A_ = bos_token_id
A_ = pad_token_id
A_ = decoder_start_token_id
A_ = use_cache
A_ = max_position_embeddings
A_ = None
A_ = decoder_seq_length
A_ = 2
A_ = 1
def __A ( self : List[Any] ):
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ = None
if self.use_attention_mask:
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __A ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ):
A_ = True
A_ = TrOCRDecoder(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
A_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
A_ = model(UpperCAmelCase , use_cache=UpperCAmelCase )
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
A_ = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
A_ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ = model(UpperCAmelCase )["last_hidden_state"]
A_ = model(UpperCAmelCase , past_key_values=UpperCAmelCase )["last_hidden_state"]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
A_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 )
def __A ( self : int ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ = config_and_inputs
A_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_lowerCamelCase : Tuple = (TrOCRForCausalLM,) if is_torch_available() else ()
_lowerCamelCase : List[str] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
_lowerCamelCase : Dict = True
_lowerCamelCase : Dict = False
def __A ( self : Optional[int] ):
A_ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCAmelCase )
A_ = ConfigTester(self , config_class=UpperCAmelCase )
def __A ( self : Optional[int] ):
pass
def __A ( self : Union[str, Any] ):
pass
def __A ( self : int ):
pass
def __A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCAmelCase )
def __A ( self : Any ):
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __A ( self : List[str] ):
pass
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__a :Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : int ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self : Optional[int] , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[Any]=None ):
A_ = {}
A_ = {}
if prompt is not None:
A_ = prompt
if generate_kwargs is not None:
A_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
A_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : str , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : str ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]=None ):
A_ = load_image(UpperCAmelCase )
if prompt is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(UpperCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
A_ = self.model.config.model_type
if model_type == "git":
A_ = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
A_ = self.tokenizer(text=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids
A_ = [self.tokenizer.cls_token_id] + input_ids
A_ = torch.tensor(UpperCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
A_ = self.image_processor(images=UpperCAmelCase , header_text=UpperCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A_ = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
model_inputs.update(UpperCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
A_ = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A_ = None
return model_inputs
def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , UpperCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
A_ = None
if generate_kwargs is None:
A_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A_ = model_inputs.pop(self.model.main_input_name )
A_ = self.model.generate(UpperCAmelCase , **UpperCAmelCase , **UpperCAmelCase )
return model_outputs
def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ):
A_ = []
for output_ids in model_outputs:
A_ = {
"generated_text": self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , )
}
records.append(UpperCAmelCase )
return records
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Tuple = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__a :Optional[Any] = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__a :Dict = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Tuple = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__a :Optional[Any] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Optional[int] = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__a :Optional[Any] = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :List[Any] = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__a :Any = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__a :Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__a :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__a :List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__a :int = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__a :Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__a :Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__a :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__a :Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__a :List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__a :str = ''
__a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__a :Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__a :List[str] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
"readme_md, expected_dict" ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ):
"""simple docstring"""
assert ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ,match=re.escape(expected_error.format(path="root" ) ) ):
A_ = ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ,match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
"readme_md," ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ,suppress_parsing_errors=__UpperCamelCase )
@pytest.mark.parametrize(
"readme_md, expected_dict" ,[
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] ,)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
A_ = ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
A_ = expected_error.format(path=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
A_ = ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" ,[
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
A_ = expected_error.format(path=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase )
@pytest.mark.parametrize(
"readme_md," ,[
(README_MULTIPLE_SAME_HEADING_1),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = Path(__UpperCamelCase ) / "README.md"
with open(__UpperCamelCase ,"w+" ) as readme_file:
readme_file.write(__UpperCamelCase )
ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ,suppress_parsing_errors=__UpperCamelCase )
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
import numpy as np
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Any ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = int(np.ceil((x_end - xa) / h ) )
A_ = np.zeros((n + 1,) )
A_ = ya
A_ = xa
for k in range(__UpperCamelCase ):
A_ = f(__UpperCamelCase ,y[k] )
A_ = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
A_ = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
A_ = f(x + h ,y[k] + h * ka )
A_ = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return False
A_ = len(__UpperCamelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] ,__UpperCamelCase )
else:
return binary_search(a_list[midpoint + 1 :] ,__UpperCamelCase )
if __name__ == "__main__":
__a :int = input('Enter numbers separated by comma:\n').strip()
__a :Dict = [int(item.strip()) for item in user_input.split(',')]
__a :str = int(input('Enter the number to be found in the list:\n').strip())
__a :Dict = '' if binary_search(sequence, target) else 'not '
print(F"{target} was {not_str}found in {sequence}")
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__a :Dict = False
__a :int = True
__a :Any = False
if __name__ == "__main__":
__a :Any = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__a :Union[str, Any] = parser.parse_args()
__a :int = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
__a :Dict = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
__a :Dict = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
__a :Tuple = reader.read()
__a :Optional[int] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
__a :Any = UNetaDModel(**config)
else:
__a :str = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
__a :List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__a :List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__a :List[str] = config[key]
del config[key]
__a :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
__a :Union[str, Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
__a :int = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
__a :Optional[int] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
__a :List[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
__a :Tuple = param_value
__a :List[Any] = True
if not has_changed:
__a :str = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import argparse
import os
import re
import packaging.version
__a :Union[str, Any] = 'examples/'
__a :List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__a :Optional[Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
__a :str = 'README.md'
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.read()
A_ , A_ = REPLACE_PATTERNS[pattern]
A_ = replace.replace("VERSION" ,__UpperCamelCase )
A_ = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.write(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern="examples" )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = "🤗 Transformers currently provides the following architectures"
A_ = "1. Want to contribute a new model?"
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start of the list.
A_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
A_ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" ,"https://huggingface.co/docs/transformers/model_doc" ,)
index += 1
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(__UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
with open(REPLACE_FILES["init"] ,"r" ) as f:
A_ = f.read()
A_ = REPLACE_PATTERNS["init"][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any]=False ):
"""simple docstring"""
A_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
A_ = default_version.base_version
elif patch:
A_ = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__UpperCamelCase ) == 0:
A_ = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __snake_case ( ):
"""simple docstring"""
A_ = get_version()
A_ = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ = current_version.base_version
# Check with the user we got that right.
A_ = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__UpperCamelCase ) == 0:
A_ = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__a :List[str] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__a :str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
from typing import List
import numpy as np
def __snake_case ( __UpperCamelCase : dict ):
"""simple docstring"""
A_ = {key: len(__UpperCamelCase ) for key, value in gen_kwargs.items() if isinstance(__UpperCamelCase ,__UpperCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
A_ = max(lists_lengths.values() ,default=0 )
return max(1 ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = []
for group_idx in range(__UpperCamelCase ):
A_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
A_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
A_ = range(__UpperCamelCase ,start + num_shards_to_add )
shards_indices_per_group.append(__UpperCamelCase )
return shards_indices_per_group
def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = _number_of_shards_in_gen_kwargs(__UpperCamelCase )
if num_shards == 1:
return [dict(__UpperCamelCase )]
else:
A_ = _distribute_shards(num_shards=__UpperCamelCase ,max_num_jobs=__UpperCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__UpperCamelCase ,__UpperCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__UpperCamelCase ) )
]
def __snake_case ( __UpperCamelCase : List[dict] ):
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,__UpperCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __snake_case ( __UpperCamelCase : np.random.Generator ,__UpperCamelCase : dict ):
"""simple docstring"""
A_ = {len(__UpperCamelCase ) for value in gen_kwargs.values() if isinstance(__UpperCamelCase ,__UpperCamelCase )}
A_ = {}
for size in list_sizes:
A_ = list(range(__UpperCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
A_ = dict(__UpperCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = [value[i] for i in indices_per_size[len(__UpperCamelCase )]]
return shuffled_kwargs
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
from collections.abc import Generator
def __snake_case ( ):
"""simple docstring"""
A_ , A_ = 0, 1
while True:
A_ , A_ = b, a + b
yield b
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
A_ = 1
A_ = fibonacci_generator()
while len(str(next(__UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
from collections.abc import Sequence
def __snake_case ( __UpperCamelCase : Sequence[int] | None = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
A_ = nums[0]
for i in range(1 ,len(__UpperCamelCase ) ):
A_ = nums[i]
A_ = max(__UpperCamelCase ,ans + num ,__UpperCamelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__a :Union[str, Any] = int(input('Enter number of elements : ').strip())
__a :Dict = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
_lowerCamelCase : ClassVar[Features] = Features({} )
_lowerCamelCase : str = "text"
@property
def __A ( self : str ):
return {self.text_column: "text"}
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__a :List[str] = '\\n Text data.\n Second line of data.'
__a :str = 'file'
@pytest.fixture(scope="session" )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
A_ = bytes(__UpperCamelCase ,"utf-8" )
with zstd.open(__UpperCamelCase ,"wb" ) as f:
f.write(__UpperCamelCase )
return path
@pytest.fixture
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir ,__UpperCamelCase ) ,"w" ) as f:
f.write(__UpperCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" ,["gzip", "xz", "zstd"] )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
A_ = input_paths[compression_format]
A_ = tmp_path / "cache"
A_ = DownloadConfig(cache_dir=__UpperCamelCase ,extract_compressed_file=__UpperCamelCase )
A_ = cached_path(__UpperCamelCase ,download_config=__UpperCamelCase )
with open(__UpperCamelCase ) as f:
A_ = f.read()
with open(__UpperCamelCase ) as f:
A_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" ,[True, False] )
@pytest.mark.parametrize("default_cache_dir" ,[True, False] )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = "custom_cache"
A_ = "custom_extracted_dir"
A_ = tmp_path / "custom_extracted_path"
if default_extracted:
A_ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" ,__UpperCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" ,str(__UpperCamelCase ) )
A_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
A_ = xz_file
A_ = (
DownloadConfig(extract_compressed_file=__UpperCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=__UpperCamelCase )
)
A_ = cached_path(__UpperCamelCase ,download_config=__UpperCamelCase )
assert Path(__UpperCamelCase ).parent.parts[-2:] == expected
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = str(Path(__UpperCamelCase ).resolve() )
assert cached_path(__UpperCamelCase ) == text_file
# relative path
A_ = str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__UpperCamelCase ) == text_file
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
# relative path
A_ = "./__missing_file__.txt"
with pytest.raises(__UpperCamelCase ):
cached_path(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(__UpperCamelCase ) as f:
A_ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__UpperCamelCase ):
http_get("https://huggingface.co" ,temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__UpperCamelCase ):
ftp_get("ftp://huggingface.co" ,temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__UpperCamelCase ):
fsspec_get("s3://huggingface.co" ,temp_file=__UpperCamelCase )
with pytest.raises(__UpperCamelCase ):
fsspec_head("s3://huggingface.co" )
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
A_ = dataset
A_ = process
A_ = params
def __len__( self : List[str] ):
return len(self.dataset )
def __getitem__( self : int , UpperCAmelCase : Tuple ):
A_ = self.dataset[i]
A_ = self.process(UpperCAmelCase , **self.params )
return processed
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=None ):
A_ = loader
A_ = infer
A_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A_ = None
A_ = loader_batch_size
# Internal bookkeeping
A_ = None
A_ = None
def __len__( self : Union[str, Any] ):
return len(self.loader )
def __iter__( self : int ):
A_ = iter(self.loader )
return self
def __A ( self : int ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
# Convert ModelOutput to tuple first
A_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
A_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase , UpperCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
A_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A_ = self._loader_batch_data.__class__(UpperCAmelCase )
self._loader_batch_index += 1
return result
def __A ( self : List[str] ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A_ = next(self.iterator )
A_ = self.infer(UpperCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase , torch.Tensor ):
A_ = processed
else:
A_ = list(processed.keys() )[0]
A_ = processed[key]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = len(UpperCAmelCase )
else:
A_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ = observed_batch_size
# Setting internal index to unwrap the batch
A_ = processed
A_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ):
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __iter__( self : Any ):
A_ = iter(self.loader )
A_ = None
return self
def __A ( self : Dict ):
if self.subiterator is None:
A_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
A_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A_ = self.infer(next(self.iterator ) , **self.params )
A_ = next(self.subiterator )
return processed
class _a ( snake_case_ ):
"""simple docstring"""
def __iter__( self : List[str] ):
A_ = iter(self.loader )
return self
def __A ( self : Optional[Any] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
A_ = False
A_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A_ = self.loader_batch_item()
A_ = item.pop("is_last" )
accumulator.append(UpperCAmelCase )
if is_last:
return accumulator
while not is_last:
A_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase , torch.Tensor ):
A_ = processed
else:
A_ = list(processed.keys() )[0]
A_ = processed[key]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = len(UpperCAmelCase )
else:
A_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A_ = observed_batch_size
A_ = processed
A_ = 0
while self._loader_batch_index < self.loader_batch_size:
A_ = self.loader_batch_item()
A_ = item.pop("is_last" )
accumulator.append(UpperCAmelCase )
if is_last:
return accumulator
else:
A_ = processed
A_ = item.pop("is_last" )
accumulator.append(UpperCAmelCase )
return accumulator
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Dataset , UpperCAmelCase : str ):
A_ = dataset
A_ = key
def __len__( self : Any ):
return len(self.dataset )
def __getitem__( self : List[str] , UpperCAmelCase : Tuple ):
return self.dataset[i][self.key]
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : str , UpperCAmelCase : str ):
A_ = dataset
A_ = keya
A_ = keya
def __len__( self : List[str] ):
return len(self.dataset )
def __getitem__( self : Optional[Any] , UpperCAmelCase : Tuple ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict=2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=7 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : Dict=36 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : int=6 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Any=None , UpperCAmelCase : Any=1000 , ):
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = patch_size
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = coordinate_size
A_ = shape_size
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A_ = text_seq_length
A_ = (image_size // patch_size) ** 2 + 1
A_ = self.text_seq_length + self.image_seq_length
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
A_ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ = bbox[i, j, 3]
A_ = bbox[i, j, 1]
A_ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ = bbox[i, j, 2]
A_ = bbox[i, j, 0]
A_ = tmp_coordinate
A_ = tf.constant(UpperCAmelCase )
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.text_seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
A_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : str ):
A_ = TFLayoutLMvaModel(config=UpperCAmelCase )
# text + image
A_ = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , )
A_ = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
A_ = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A_ = model({"pixel_values": pixel_values} , training=UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ):
A_ = self.num_labels
A_ = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
A_ = self.num_labels
A_ = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
A_ = 2
A_ = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase )
A_ = model(
UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[Any] ):
A_ = self.prepare_config_and_inputs()
((A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs
A_ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowerCamelCase : Any = False
_lowerCamelCase : str = False
_lowerCamelCase : str = False
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
return True
def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple=False ):
A_ = copy.deepcopy(UpperCAmelCase )
if model_class in get_values(UpperCAmelCase ):
A_ = {
k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase ):
A_ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCAmelCase ):
A_ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __A ( self : Union[str, Any] ):
A_ = TFLayoutLMvaModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : int ):
self.config_tester.run_common_tests()
def __A ( self : Optional[int] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
if getattr(UpperCAmelCase , "hf_compute_loss" , UpperCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0]
]
A_ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = prepared_for_class.pop("input_ids" )
A_ = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
A_ = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A_ = -100
A_ = tf.convert_to_tensor(UpperCAmelCase )
A_ = model(UpperCAmelCase , **UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
A_ = model(UpperCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase )
# Get keys that were added with the _prepare_for_class function
A_ = prepared_for_class.keys() - inputs_dict.keys()
A_ = inspect.signature(model.call ).parameters
A_ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A_ = {0: "input_ids"}
for label_key in label_keys:
A_ = signature_names.index(UpperCAmelCase )
A_ = label_key
A_ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A_ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A_ = prepared_for_class[value]
A_ = tuple(UpperCAmelCase )
# Send to model
A_ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __A ( self : Any ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[int] ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ = type
self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Any ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : int ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Union[str, Any] ):
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Dict ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None
@slow
def __A ( self : int ):
A_ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="tf" ).pixel_values
A_ = tf.constant([[1, 2]] )
A_ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
A_ = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase )
# verify the logits
A_ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase )
A_ = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :List[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A_ = key.replace("module.encoder" ,"glpn.encoder" )
if key.startswith("module.decoder" ):
A_ = key.replace("module.decoder" ,"decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A_ = key[key.find("patch_embed" ) + len("patch_embed" )]
A_ = key.replace(f'''patch_embed{idx}''' ,f'''patch_embeddings.{int(__UpperCamelCase )-1}''' )
if "norm" in key:
A_ = key.replace("norm" ,"layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A_ = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A_ = key.replace(f'''layer_norm{idx}''' ,f'''layer_norm.{int(__UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A_ = key.replace("layer_norm1" ,"layer_norm_1" )
if "layer_norm2" in key:
A_ = key.replace("layer_norm2" ,"layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A_ = key[key.find("block" ) + len("block" )]
A_ = key.replace(f'''block{idx}''' ,f'''block.{int(__UpperCamelCase )-1}''' )
if "attn.q" in key:
A_ = key.replace("attn.q" ,"attention.self.query" )
if "attn.proj" in key:
A_ = key.replace("attn.proj" ,"attention.output.dense" )
if "attn" in key:
A_ = key.replace("attn" ,"attention.self" )
if "fc1" in key:
A_ = key.replace("fc1" ,"dense1" )
if "fc2" in key:
A_ = key.replace("fc2" ,"dense2" )
if "linear_pred" in key:
A_ = key.replace("linear_pred" ,"classifier" )
if "linear_fuse" in key:
A_ = key.replace("linear_fuse.conv" ,"linear_fuse" )
A_ = key.replace("linear_fuse.bn" ,"batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A_ = key[key.find("linear_c" ) + len("linear_c" )]
A_ = key.replace(f'''linear_c{idx}''' ,f'''linear_c.{int(__UpperCamelCase )-1}''' )
if "bot_conv" in key:
A_ = key.replace("bot_conv" ,"0.convolution" )
if "skip_conv1" in key:
A_ = key.replace("skip_conv1" ,"1.convolution" )
if "skip_conv2" in key:
A_ = key.replace("skip_conv2" ,"2.convolution" )
if "fusion1" in key:
A_ = key.replace("fusion1" ,"1.fusion" )
if "fusion2" in key:
A_ = key.replace("fusion2" ,"2.fusion" )
if "fusion3" in key:
A_ = key.replace("fusion3" ,"3.fusion" )
if "fusion" in key and "conv" in key:
A_ = key.replace("conv" ,"convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A_ = key.replace("module.last_layer_depth" ,"head.head" )
A_ = value
return new_state_dict
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Dict ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A_ = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A_ = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A_ = kv_weight[
: config.hidden_sizes[i], :
]
A_ = kv_bias[: config.hidden_sizes[i]]
A_ = kv_weight[
config.hidden_sizes[i] :, :
]
A_ = kv_bias[config.hidden_sizes[i] :]
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : str=False ,__UpperCamelCase : Any=None ):
"""simple docstring"""
A_ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] ,decoder_hidden_size=64 ,depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A_ = GLPNImageProcessor()
# prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(__UpperCamelCase ,__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = GLPNForDepthEstimation(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A_ = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
A_ = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
A_ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization="nielsr" ,commit_message="Add model" ,use_temp_dir=__UpperCamelCase ,)
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization="nielsr" ,commit_message="Add image processor" ,use_temp_dir=__UpperCamelCase ,)
if __name__ == "__main__":
__a :Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a :Dict = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = (DPMSolverSDEScheduler,)
_lowerCamelCase : Tuple = 1_0
def __A ( self : str , **UpperCAmelCase : Optional[int] ):
A_ = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**UpperCAmelCase )
return config
def __A ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def __A ( self : str ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def __A ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def __A ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __A ( self : Any ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="v_prediction" )
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __A ( self : Dict ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __A ( self : List[Any] ):
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCAmelCase , use_karras_sigmas=UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma
A_ = sample.to(UpperCAmelCase )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
A_ = model(UpperCAmelCase , UpperCAmelCase )
A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCAmelCase ) )
A_ = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__a :str = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__a :str = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ):
"""simple docstring"""
inspect_dataset(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Tuple ):
"""simple docstring"""
inspect_metric(__UpperCamelCase ,__UpperCamelCase )
A_ = path + ".py"
assert script_name in os.listdir(__UpperCamelCase )
assert "__pycache__" not in os.listdir(__UpperCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = get_dataset_config_names(__UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert list(infos.keys() ) == expected_configs
A_ = expected_configs[0]
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_dataset_infos(__UpperCamelCase )
assert expected_config in infos
A_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(__UpperCamelCase ):
get_dataset_split_names(__UpperCamelCase ,config_name=__UpperCamelCase )
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
__a :Union[str, Any] = 100
__a :Union[str, Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__a :int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
A_ = set()
A_ = 42
A_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __snake_case ( __UpperCamelCase : int = 5000 ):
"""simple docstring"""
for number_to_partition in range(1 ,__UpperCamelCase ):
if len(partition(__UpperCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
__a :Optional[Any] = range(2, 20 + 1)
__a :Optional[int] = [10**k for k in range(ks[-1] + 1)]
__a :dict[int, dict[int, list[list[int]]]] = {}
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = sum(a_i[j] for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ) )
A_ = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) ,__UpperCamelCase ) ) )
A_ , A_ = 0, 0
A_ = n - i
A_ = memo.get(__UpperCamelCase )
if sub_memo is not None:
A_ = sub_memo.get(__UpperCamelCase )
if jumps is not None and len(__UpperCamelCase ) > 0:
# find and make the largest jump without going over
A_ = -1
for _k in range(len(__UpperCamelCase ) - 1 ,-1 ,-1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A_ = _k
break
if max_jump >= 0:
A_ , A_ , A_ = jumps[max_jump]
# since the difference between jumps is cached, add c
A_ = diff + c
for j in range(min(__UpperCamelCase ,len(__UpperCamelCase ) ) ):
A_ , A_ = divmod(__UpperCamelCase ,10 )
if new_c > 0:
add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
else:
A_ = []
else:
A_ = {c: []}
A_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A_ , A_ = next_term(__UpperCamelCase ,k - 1 ,i + dn ,__UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A_ , A_ = compute(__UpperCamelCase ,__UpperCamelCase ,i + dn ,__UpperCamelCase )
diff += _diff
dn += terms_jumped
A_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
A_ = 0
while j < len(__UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__UpperCamelCase ,(diff, dn, k) )
return (diff, dn)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(__UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A_ = i
A_ , A_ , A_ = 0, 0, 0
for j in range(len(__UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A_ = ds_c + ds_b
diff += addend
A_ = 0
for j in range(__UpperCamelCase ):
A_ = a_i[j] + addend
A_ , A_ = divmod(__UpperCamelCase ,10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return diff, i - start_i
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ):
"""simple docstring"""
for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ):
A_ = digits[j] + addend
if s >= 10:
A_ , A_ = divmod(__UpperCamelCase ,10 )
A_ = addend // 10 + quotient
else:
A_ = s
A_ = addend // 10
if addend == 0:
break
while addend > 0:
A_ , A_ = divmod(__UpperCamelCase ,10 )
digits.append(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int = 10**15 ):
"""simple docstring"""
A_ = [1]
A_ = 1
A_ = 0
while True:
A_ , A_ = next_term(__UpperCamelCase ,20 ,i + dn ,__UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A_ = 0
for j in range(len(__UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }")
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : float = 1 / 1_2345 ):
"""simple docstring"""
A_ = 0
A_ = 0
A_ = 3
while True:
A_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__UpperCamelCase ):
A_ = int(__UpperCamelCase )
total_partitions += 1
if check_partition_perfect(__UpperCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__UpperCamelCase )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }")
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__a :int = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any]=None ):
require_version(deps[pkg] ,__UpperCamelCase )
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
__a :Optional[Any] = 'Input must be a string of 8 numbers plus letter'
__a :int = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}'''
raise TypeError(__UpperCamelCase )
A_ = spanish_id.replace("-" ,"" ).upper()
if len(__UpperCamelCase ) != 9:
raise ValueError(__UpperCamelCase )
try:
A_ = int(spanish_id_clean[0:8] )
A_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(__UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'''{test_file} instead.''' )
A_ = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
A_ = components[:-1] + [test_fn.replace(".py" ,"" )]
A_ = ".".join(__UpperCamelCase )
return test_module_path
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = get_module_path(__UpperCamelCase )
A_ = importlib.import_module(__UpperCamelCase )
return test_module
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
A_ = []
A_ = get_test_module(__UpperCamelCase )
for attr in dir(__UpperCamelCase ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(__UpperCamelCase ,__UpperCamelCase ) )
# sort with class names
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = []
A_ = get_test_module(__UpperCamelCase )
for attr in dir(__UpperCamelCase ):
A_ = getattr(__UpperCamelCase ,__UpperCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
A_ = getattr(__UpperCamelCase ,"all_model_classes" ,[] )
if len(__UpperCamelCase ) > 0:
test_classes.append(__UpperCamelCase )
# sort with class names
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = get_test_classes(__UpperCamelCase )
A_ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ )
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = test_class()
if hasattr(__UpperCamelCase ,"setUp" ):
test.setUp()
A_ = None
if hasattr(__UpperCamelCase ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
A_ = test.model_tester.__class__
return model_tester
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = get_test_classes(__UpperCamelCase )
A_ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__UpperCamelCase )
# sort with class names
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = get_test_classes_for_model(__UpperCamelCase ,__UpperCamelCase )
A_ = []
for test_class in test_classes:
A_ = get_model_tester_from_test_class(__UpperCamelCase )
if tester_class is not None:
tester_classes.append(__UpperCamelCase )
# sort with class names
return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = get_test_classes(__UpperCamelCase )
A_ = {test_class: get_model_tester_from_test_class(__UpperCamelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = get_model_classes(__UpperCamelCase )
A_ = {
model_class: get_test_classes_for_model(__UpperCamelCase ,__UpperCamelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = get_model_classes(__UpperCamelCase )
A_ = {
model_class: get_tester_classes_for_model(__UpperCamelCase ,__UpperCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
return o
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
return o.__name__
elif isinstance(__UpperCamelCase ,(list, tuple) ):
return [to_json(__UpperCamelCase ) for x in o]
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
return {to_json(__UpperCamelCase ): to_json(__UpperCamelCase ) for k, v in o.items()}
else:
return o
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
import datasets
__a :Any = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__a :int = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__a :Optional[int] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def __A ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ):
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__a :Any = None
__a :int = logging.get_logger(__name__)
__a :Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__a :Dict = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
__a :Any = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
__a :Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Dict = ['input_ids', 'attention_mask']
_lowerCamelCase : Any = NllbTokenizer
_lowerCamelCase : List[int] = []
_lowerCamelCase : List[int] = []
def __init__( self : Tuple , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : List[str]="</s>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Tuple="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
A_ = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
A_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
A_ = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A_ = src_lang if src_lang is not None else "eng_Latn"
A_ = self.convert_tokens_to_ids(self._src_lang )
A_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self : int ):
return self._src_lang
@src_lang.setter
def __A ( self : Tuple , UpperCAmelCase : str ):
A_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
A_ = src_lang
A_ = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
A_ = self.convert_tokens_to_ids(UpperCAmelCase )
A_ = tgt_lang_id
return inputs
def __A ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Optional[int] , ):
A_ = src_lang
A_ = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def __A ( self : List[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : Tuple ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCAmelCase : str ):
A_ = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
else:
A_ = [self.cur_lang_code]
A_ = [self.eos_token_id]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : Optional[Any] , UpperCAmelCase : str ):
A_ = self.convert_tokens_to_ids(UpperCAmelCase )
if self.legacy_behaviour:
A_ = []
A_ = [self.eos_token_id, self.cur_lang_code]
else:
A_ = [self.cur_lang_code]
A_ = [self.eos_token_id]
A_ = self.convert_ids_to_tokens(self.prefix_tokens )
A_ = self.convert_ids_to_tokens(self.suffix_tokens )
A_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
from collections import defaultdict
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = first_str.lower().strip()
A_ = second_str.lower().strip()
# Remove whitespace
A_ = first_str.replace(" " ,"" )
A_ = second_str.replace(" " ,"" )
# Strings of different lengths are not anagrams
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
# Default values for count should be 0
A_ = defaultdict(__UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__a :Any = input('Enter the first string ').strip()
__a :Any = input('Enter the second string ').strip()
__a :Optional[int] = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
debug_launcher(test_script.main )
def __A ( self : List[str] ):
debug_launcher(test_ops.main )
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__a :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , **UpperCAmelCase : Tuple ):
super().__init__(**UpperCAmelCase )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : List[str] , UpperCAmelCase : Union[np.ndarray, bytes, str] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , **UpperCAmelCase : List[str] ):
A_ = {}
if "candidate_labels" in kwargs:
A_ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
A_ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __A ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]="This is a sound of {}." ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if audio.startswith("http://" ) or audio.startswith("https://" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A_ = requests.get(UpperCAmelCase ).content
else:
with open(UpperCAmelCase , "rb" ) as f:
A_ = f.read()
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = ffmpeg_read(UpperCAmelCase , self.feature_extractor.sampling_rate )
if not isinstance(UpperCAmelCase , np.ndarray ):
raise ValueError("We expect a numpy ndarray as input" )
if len(audio.shape ) != 1:
raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" )
A_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" )
A_ = candidate_labels
A_ = [hypothesis_template.format(UpperCAmelCase ) for x in candidate_labels]
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework , padding=UpperCAmelCase )
A_ = [text_inputs]
return inputs
def __A ( self : Dict , UpperCAmelCase : Any ):
A_ = model_inputs.pop("candidate_labels" )
A_ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase ):
A_ = text_inputs[0]
else:
# Batching case.
A_ = text_inputs[0][0]
A_ = self.model(**UpperCAmelCase , **UpperCAmelCase )
A_ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_audio,
}
return model_outputs
def __A ( self : str , UpperCAmelCase : List[Any] ):
A_ = model_outputs.pop("candidate_labels" )
A_ = model_outputs["logits"][0]
if self.framework == "pt":
A_ = logits.softmax(dim=0 )
A_ = probs.tolist()
else:
raise ValueError("`tf` framework not supported." )
A_ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase , UpperCAmelCase ) , key=lambda UpperCAmelCase : -x[0] )
]
return result
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__a :List[Any] = random.Random()
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple=1.0 ,__UpperCamelCase : str=None ,__UpperCamelCase : Union[str, Any]=None ):
"""simple docstring"""
if rng is None:
A_ = global_rng
A_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : str=7 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : Tuple=2000 , UpperCAmelCase : Optional[Any]=24 , UpperCAmelCase : Union[str, Any]=24 , UpperCAmelCase : str=0.0 , UpperCAmelCase : str=16000 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[Any]=True , ):
A_ = parent
A_ = batch_size
A_ = min_seq_length
A_ = max_seq_length
A_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ = feature_size
A_ = num_mel_bins
A_ = padding_value
A_ = sampling_rate
A_ = return_attention_mask
A_ = do_normalize
def __A ( self : List[str] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self : str , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Optional[Any]=False ):
def _flatten(UpperCAmelCase : int ):
return list(itertools.chain(*UpperCAmelCase ) )
if equal_length:
A_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self : Dict ):
A_ = SpeechaTextFeatureExtractionTester(self )
def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ):
self.assertTrue(np.all(np.mean(UpperCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def __A ( self : Optional[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
A_ = feature_extractor(UpperCAmelCase , padding=UpperCAmelCase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
A_ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
A_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
# Test batched
A_ = feature_extractor(UpperCAmelCase , return_tensors="np" ).input_features
A_ = feature_extractor(UpperCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
A_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ = np.asarray(UpperCAmelCase )
A_ = feature_extractor(UpperCAmelCase , return_tensors="np" ).input_features
A_ = feature_extractor(UpperCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
def __A ( self : Dict ):
A_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ = ["longest", "max_length", "do_not_pad"]
A_ = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = feature_extractor(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase )
A_ = inputs.input_features
A_ = inputs.attention_mask
A_ = [np.sum(UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self : Any ):
A_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ = ["longest", "max_length", "do_not_pad"]
A_ = [None, 16, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
A_ = feature_extractor(
UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase , return_tensors="np" , return_attention_mask=UpperCAmelCase )
A_ = inputs.input_features
A_ = inputs.attention_mask
A_ = [np.sum(UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self : Any ):
A_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ = feature_extractor(
UpperCAmelCase , padding="max_length" , max_length=4 , truncation=UpperCAmelCase , return_tensors="np" , return_attention_mask=UpperCAmelCase , )
A_ = inputs.input_features
A_ = inputs.attention_mask
A_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self : Union[str, Any] ):
A_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ = feature_extractor(
UpperCAmelCase , padding="longest" , max_length=4 , truncation=UpperCAmelCase , return_tensors="np" , return_attention_mask=UpperCAmelCase , )
A_ = inputs.input_features
A_ = inputs.attention_mask
A_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
A_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A_ = feature_extractor(
UpperCAmelCase , padding="longest" , max_length=16 , truncation=UpperCAmelCase , return_tensors="np" , return_attention_mask=UpperCAmelCase , )
A_ = inputs.input_features
A_ = inputs.attention_mask
A_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self : Optional[int] ):
import torch
A_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ = np.random.rand(100 , 32 ).astype(np.floataa )
A_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self : Optional[Any] , UpperCAmelCase : int ):
from datasets import load_dataset
A_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A_ = ds.sort("id" ).select(range(UpperCAmelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __A ( self : str ):
# fmt: off
A_ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
A_ = self._load_datasamples(1 )
A_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A_ = feature_extractor(UpperCAmelCase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCAmelCase , atol=1E-4 ) )
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__a :Tuple = logging.get_logger(__name__)
__a :Optional[Any] = 'Hello world! cécé herlolip'
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : bool ):
"""simple docstring"""
A_ = FairseqRobertaModel.from_pretrained(__UpperCamelCase )
roberta.eval() # disable dropout
A_ = roberta.model.encoder.sentence_encoder
A_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
A_ = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,__UpperCamelCase )
A_ = XLMRobertaXLForSequenceClassification(__UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ = roberta_sent_encoder.embed_tokens.weight
A_ = roberta_sent_encoder.embed_positions.weight
A_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ = roberta_sent_encoder.layer_norm.weight
A_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ = model.roberta.encoder.layer[i]
A_ = roberta_sent_encoder.layers[i]
A_ = layer.attention
A_ = roberta_layer.self_attn_layer_norm.weight
A_ = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ = roberta_layer.self_attn.q_proj.weight
A_ = roberta_layer.self_attn.q_proj.bias
A_ = roberta_layer.self_attn.k_proj.weight
A_ = roberta_layer.self_attn.k_proj.bias
A_ = roberta_layer.self_attn.v_proj.weight
A_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ = roberta_layer.self_attn.out_proj.weight
A_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ = roberta_layer.final_layer_norm.weight
A_ = roberta_layer.final_layer_norm.bias
# intermediate
A_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ = roberta_layer.fca.weight
A_ = roberta_layer.fca.bias
# output
A_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ = roberta_layer.fca.weight
A_ = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ = roberta.model.classification_heads["mnli"].dense.weight
A_ = roberta.model.classification_heads["mnli"].dense.bias
A_ = roberta.model.classification_heads["mnli"].out_proj.weight
A_ = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A_ = roberta.model.encoder.lm_head.dense.weight
A_ = roberta.model.encoder.lm_head.dense.bias
A_ = roberta.model.encoder.lm_head.layer_norm.weight
A_ = roberta.model.encoder.lm_head.layer_norm.bias
A_ = roberta.model.encoder.lm_head.weight
A_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ = roberta.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
A_ = model(__UpperCamelCase )[0]
if classification_head:
A_ = roberta.model.classification_heads["mnli"](roberta.extract_features(__UpperCamelCase ) )
else:
A_ = roberta.model(__UpperCamelCase )[0]
print(our_output.shape ,their_output.shape )
A_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ = torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1E-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__a :Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a :List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a ( datasets.BuilderConfig ):
"""simple docstring"""
_lowerCamelCase : Optional[datasets.Features] = None
_lowerCamelCase : str = "utf-8"
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : bool = True # deprecated
_lowerCamelCase : Optional[int] = None # deprecated
_lowerCamelCase : int = 1_0 << 2_0 # 10MB
_lowerCamelCase : Optional[bool] = None
class _a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = JsonConfig
def __A ( self : Optional[Any] ):
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
A_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : Dict , UpperCAmelCase : Tuple ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
A_ = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = [files]
A_ = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A_ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = [files]
A_ = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def __A ( self : Optional[Any] , UpperCAmelCase : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ = self.config.features.arrow_schema.field(UpperCAmelCase ).type
A_ = pa_table.append_column(UpperCAmelCase , pa.array([None] * len(UpperCAmelCase ) , type=UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ = table_cast(UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ = json.load(UpperCAmelCase )
# We keep only the field we are interested in
A_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase , (list, tuple) ):
A_ = set().union(*[row.keys() for row in dataset] )
A_ = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
else:
A_ = dataset
A_ = pa.Table.from_pydict(UpperCAmelCase )
yield file_idx, self._cast_table(UpperCAmelCase )
# If the file has one json object per line
else:
with open(UpperCAmelCase , "rb" ) as f:
A_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ = max(self.config.chunksize // 32 , 16 << 10 )
A_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
A_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ = batch.decode(self.config.encoding , errors=UpperCAmelCase ).encode("utf-8" )
try:
while True:
try:
A_ = paj.read_json(
io.BytesIO(UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase )
or block_size > len(UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(UpperCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ = json.load(UpperCAmelCase )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase , UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
A_ = set().union(*[row.keys() for row in dataset] )
A_ = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
A_ = pa.Table.from_pydict(UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCAmelCase )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
batch_idx += 1
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :int = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__a :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
try:
A_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A_ = default
else:
# KEY is set, convert it to True or False.
try:
A_ = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__a :Any = parse_flag_from_env('RUN_SLOW', default=False)
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
return unittest.skip("Test was skipped" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests ,"test is slow" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() ,"test requires only a CPU" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() ,"test requires a GPU" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() ,"test requires a XPU" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() ,"test requires a `mps` backend support in `torch`" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,"test requires the Hugging Face suite" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() ,"test requires the bitsandbytes library" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() ,"test requires TPU" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 ,"test requires a GPU" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 ,"test requires a XPU" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 ,"test requires multiple GPUs" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 ,"test requires multiple XPUs" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() ,"test requires safetensors" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() ,"test requires DeepSpeed" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version(">=" ,"1.12.0" ) ,"test requires torch version >= 1.12.0" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[Any]=None ,__UpperCamelCase : Optional[Any]=None ):
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase ,version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version(">=" ,__UpperCamelCase ) ,f'''test requires torch version >= {version}''' )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() ,"test requires Tensorboard" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() ,"test requires wandb" )(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() ,"test requires comet_ml" )(__UpperCamelCase )
__a :List[str] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available ,"test requires at least one tracker to be available and for `comet_ml` to not be installed" ,)(__UpperCamelCase )
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = True
@classmethod
def __A ( cls : Any ):
A_ = tempfile.mkdtemp()
@classmethod
def __A ( cls : List[Any] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __A ( self : Dict ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCAmelCase )
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] , UpperCAmelCase : Union[mock.Mock, List[mock.Mock]] ):
A_ = mocks if isinstance(UpperCAmelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = AcceleratorState()
A_ = tensor[None].clone().to(state.device )
A_ = gather(__UpperCamelCase ).cpu()
A_ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,__UpperCamelCase ):
return False
return True
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ):
A_ = returncode
A_ = stdout
A_ = stderr
async def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ):
"""simple docstring"""
while True:
A_ = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int=None ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : Tuple=None ,__UpperCamelCase : Tuple=False ,__UpperCamelCase : Any=False ):
"""simple docstring"""
if echo:
print("\nRunning: " ," ".join(__UpperCamelCase ) )
A_ = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__UpperCamelCase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__UpperCamelCase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A_ = []
A_ = []
def tee(__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[Any]="" ):
A_ = line.decode("utf-8" ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase ,__UpperCamelCase ,file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda __UpperCamelCase : tee(__UpperCamelCase ,__UpperCamelCase ,sys.stdout ,label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda __UpperCamelCase : tee(__UpperCamelCase ,__UpperCamelCase ,sys.stderr ,label="stderr:" ) ) ),
] ,timeout=__UpperCamelCase ,)
return _RunOutput(await p.wait() ,__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=None ,__UpperCamelCase : Optional[Any]=180 ,__UpperCamelCase : str=False ,__UpperCamelCase : str=True ):
"""simple docstring"""
A_ = asyncio.get_event_loop()
A_ = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase ,env=__UpperCamelCase ,stdin=__UpperCamelCase ,timeout=__UpperCamelCase ,quiet=__UpperCamelCase ,echo=__UpperCamelCase ) )
A_ = " ".join(__UpperCamelCase )
if result.returncode > 0:
A_ = "\n".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _a ( snake_case_ ):
"""simple docstring"""
pass
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int]=False ):
"""simple docstring"""
try:
A_ = subprocess.check_output(__UpperCamelCase ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase ,"decode" ):
A_ = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
__a :Dict = 'https://openaipublic.azureedge.net/jukebox/models/'
__a :List[str] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
A_ = key.replace(".model.1.bias" ,".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
A_ = key.replace(".model.1.weight" ,".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
A_ = key.replace(".model.3.bias" ,".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
A_ = key.replace(".model.3.weight" ,".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
A_ = key.replace("conditioner_blocks.0" ,"conditioner_blocks" )
if "prime_prior" in key:
A_ = key.replace("prime_prior" ,"encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A_ = key.replace(".emb." ,"." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" ,".codebook" )
if "y_emb." in key:
return key.replace("y_emb." ,"metadata_embedding." )
if "x_emb.emb." in key:
A_ = key.replace("0.x_emb.emb" ,"embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" ,"encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" ,".layer_norm" )
if "_ln" in key:
return key.replace("_ln" ,"_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" ,"encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" ,"encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" ,"fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" ,"embed_tokens" )
return key
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = {}
import re
A_ = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A_ = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A_ = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A_ = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A_ = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A_ = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A_ = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
A_ = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A_ = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__UpperCamelCase ):
A_ = re_encoder_block_conv_in.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = int(groups[2] ) * 2 + int(groups[3] )
A_ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
A_ = re_encoder_block_conv_in.sub(__UpperCamelCase ,__UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(__UpperCamelCase ):
A_ = re_encoder_block_resnet.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = int(groups[2] ) * 2 + int(groups[3] )
A_ = {"1": 1, "3": 2}[groups[-2]]
A_ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
A_ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A_ = prefix + resnet_block
A_ = re_encoder_block_resnet.sub(__UpperCamelCase ,__UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(__UpperCamelCase ):
A_ = re_encoder_block_proj_out.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
A_ = re_encoder_block_proj_out.sub(__UpperCamelCase ,__UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__UpperCamelCase ):
A_ = re_decoder_block_conv_out.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A_ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
A_ = re_decoder_block_conv_out.sub(__UpperCamelCase ,__UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(__UpperCamelCase ):
A_ = re_decoder_block_resnet.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A_ = {"1": 1, "3": 2}[groups[-2]]
A_ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
A_ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A_ = prefix + resnet_block
A_ = re_decoder_block_resnet.sub(__UpperCamelCase ,__UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(__UpperCamelCase ):
A_ = re_decoder_block_proj_in.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
A_ = re_decoder_block_proj_in.sub(__UpperCamelCase ,__UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__UpperCamelCase ):
A_ = re_prior_cond_conv_out.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A_ = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
A_ = re_prior_cond_conv_out.sub(__UpperCamelCase ,__UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(__UpperCamelCase ):
A_ = re_prior_cond_resnet.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A_ = {"1": 1, "3": 2}[groups[-2]]
A_ = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
A_ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A_ = prefix + resnet_block
A_ = re_prior_cond_resnet.sub(__UpperCamelCase ,__UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(__UpperCamelCase ):
A_ = re_prior_cond_proj_in.match(__UpperCamelCase )
A_ = regex_match.groups()
A_ = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
A_ = re_prior_cond_proj_in.sub(__UpperCamelCase ,__UpperCamelCase )
# keep original key
else:
A_ = original_key
A_ = replace_key(__UpperCamelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
A_ = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
A_ = original_key
A_ = original_key
A_ = value
return new_dict
@torch.no_grad()
def __snake_case ( __UpperCamelCase : str=None ,__UpperCamelCase : int=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
A_ = requests.get(f'''{PREFIX}{file}''' ,allow_redirects=__UpperCamelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' ,exist_ok=__UpperCamelCase )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ,"wb" ).write(r.content )
A_ = MODEL_MAPPING[model_name.split("/" )[-1]]
A_ = JukeboxConfig.from_pretrained(__UpperCamelCase )
A_ = JukeboxModel(__UpperCamelCase )
A_ = []
A_ = {}
for i, dict_name in enumerate(__UpperCamelCase ):
A_ = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["model"]
A_ = {}
for k in old_dic.keys():
if k.endswith(".b" ):
A_ = old_dic[k]
elif k.endswith(".w" ):
A_ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A_ = old_dic[k]
else:
A_ = old_dic[k]
A_ = "vqvae" if i == 0 else f'''priors.{3 - i}'''
A_ = fix_jukebox_keys(__UpperCamelCase ,model.state_dict() ,__UpperCamelCase ,__UpperCamelCase )
weight_dict.append(__UpperCamelCase )
A_ = weight_dict.pop(0 )
model.vqvae.load_state_dict(__UpperCamelCase )
for i in range(len(__UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' ,"w" ) as txtfile:
json.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
return weight_dict
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__a :Tuple = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a :Optional[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = DebertaVaTokenizer
_lowerCamelCase : int = DebertaVaTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Any = True
def __A ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = DebertaVaTokenizer(UpperCAmelCase , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self : Union[str, Any] , UpperCAmelCase : str ):
A_ = "this is a test"
A_ = "this is a test"
return input_text, output_text
def __A ( self : List[Any] ):
A_ = "<pad>"
A_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def __A ( self : int ):
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(UpperCAmelCase ) , 30001 )
def __A ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def __A ( self : List[str] ):
# fmt: off
A_ = " \tHeLLo!how \n Are yoU? "
A_ = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __A ( self : Optional[int] ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __A ( self : Optional[Any] ):
pass
def __A ( self : Optional[Any] ):
# fmt: off
A_ = "I was born in 92000, and this is falsé."
A_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A_ = DebertaVaTokenizer(UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = DebertaVaTokenizerFast(UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Any ):
# fmt: off
A_ = "I was born in 92000, and this is falsé."
A_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
# fmt: off
A_ = "I was born in 92000, and this is falsé."
A_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
# fmt: off
A_ = "I was born in 92000, and this is falsé."
A_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
# fmt: off
A_ = " \tHeLLo!how \n Are yoU? "
A_ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
A_ = DebertaVaTokenizer(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = DebertaVaTokenizerFast(UpperCAmelCase , do_lower_case=UpperCAmelCase , split_by_punct=UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = "I was born in 92000, and this is falsé."
A_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
A_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = self.get_rust_tokenizer()
A_ = tokenizer.encode(UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = "This is a test"
A_ = [13, 1, 4398, 25, 21, 1289]
A_ = ["▁", "T", "his", "▁is", "▁a", "▁test"]
A_ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
A_ = DebertaVaTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
A_ = DebertaVaTokenizerFast(UpperCAmelCase , keep_accents=UpperCAmelCase )
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# fmt: off
A_ = "I was born in 92000, and this is falsé."
A_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
A_ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
A_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
A_ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = DebertaVaTokenizer(UpperCAmelCase )
A_ = tokenizer.encode("sequence builders" )
A_ = tokenizer.encode("multi-sequence build" )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCAmelCase , )
@slow
def __A ( self : List[Any] ):
# fmt: off
A_ = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Union[str, Any]=18 , UpperCAmelCase : List[str]=30 , UpperCAmelCase : Tuple=400 , UpperCAmelCase : Any=True , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=True , ):
A_ = size if size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_normalize
def __A ( self : int ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804],
[-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ):
A_ = ImageGPTImageProcessingTester(self )
@property
def __A ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[int] ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "clusters" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
def __A ( self : Dict ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : List[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
A_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(UpperCAmelCase , "image_processor.json" )
image_processor_first.to_json_file(UpperCAmelCase )
A_ = self.image_processing_class.from_json_file(UpperCAmelCase ).to_dict()
A_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase )
def __A ( self : Any ):
A_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCAmelCase )
A_ = self.image_processing_class.from_pretrained(UpperCAmelCase ).to_dict()
A_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCAmelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def __A ( self : str ):
pass
def __snake_case ( ):
"""simple docstring"""
A_ = load_dataset("hf-internal-testing/fixtures_image_utils" ,split="test" )
A_ = Image.open(dataset[4]["file"] )
A_ = Image.open(dataset[5]["file"] )
A_ = [imagea, imagea]
return images
@require_vision
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Any ):
A_ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
A_ = prepare_images()
# test non-batched
A_ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
A_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase )
# test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
A_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase )
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :List[str] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ = ""
else:
A_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = ViTMSNConfig()
A_ = 1000
A_ = "datasets/huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A_ = 384
A_ = 1536
A_ = 6
elif "l16" in checkpoint_url:
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
A_ = 0.1
elif "b4" in checkpoint_url:
A_ = 4
elif "l7" in checkpoint_url:
A_ = 7
A_ = 1024
A_ = 4096
A_ = 24
A_ = 16
A_ = 0.1
A_ = ViTMSNModel(__UpperCamelCase )
A_ = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location="cpu" )["target_encoder"]
A_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCamelCase )
A_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,base_model=__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
A_ = ViTImageProcessor(
size=config.image_size ,image_mean=__UpperCamelCase ,image_std=__UpperCamelCase )
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
A_ = model(**__UpperCamelCase )
A_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A_ = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
A_ = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
A_ = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
A_ = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
A_ = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,__UpperCamelCase ,atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Optional[int] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _a ( snake_case_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __A ( UpperCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def __A ( self : int ):
raise NotImplementedError()
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
__a :List[str] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __snake_case ( __UpperCamelCase : str ):
A_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
A_ = Stack()
A_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__UpperCamelCase )
elif i == ")":
# RULE 4
A_ = operator_stack.peek()
operator_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operators[opr](__UpperCamelCase ,__UpperCamelCase )
operand_stack.push(__UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__a :str = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Any = logging.get_logger(__name__)
__a :Optional[Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = 'cvt'
def __init__( self : Tuple , UpperCAmelCase : int=3 , UpperCAmelCase : int=[7, 3, 3] , UpperCAmelCase : Optional[int]=[4, 2, 2] , UpperCAmelCase : Dict=[2, 1, 1] , UpperCAmelCase : Dict=[64, 192, 384] , UpperCAmelCase : Union[str, Any]=[1, 3, 6] , UpperCAmelCase : int=[1, 2, 10] , UpperCAmelCase : Union[str, Any]=[4.0, 4.0, 4.0] , UpperCAmelCase : Tuple=[0.0, 0.0, 0.0] , UpperCAmelCase : Any=[0.0, 0.0, 0.0] , UpperCAmelCase : Optional[Any]=[0.0, 0.0, 0.1] , UpperCAmelCase : Tuple=[True, True, True] , UpperCAmelCase : List[str]=[False, False, True] , UpperCAmelCase : str=["dw_bn", "dw_bn", "dw_bn"] , UpperCAmelCase : str=[3, 3, 3] , UpperCAmelCase : Dict=[1, 1, 1] , UpperCAmelCase : Optional[Any]=[2, 2, 2] , UpperCAmelCase : Optional[int]=[1, 1, 1] , UpperCAmelCase : Dict=[1, 1, 1] , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , **UpperCAmelCase : Dict , ):
super().__init__(**UpperCAmelCase )
A_ = num_channels
A_ = patch_sizes
A_ = patch_stride
A_ = patch_padding
A_ = embed_dim
A_ = num_heads
A_ = depth
A_ = mlp_ratio
A_ = attention_drop_rate
A_ = drop_rate
A_ = drop_path_rate
A_ = qkv_bias
A_ = cls_token
A_ = qkv_projection_method
A_ = kernel_qkv
A_ = padding_kv
A_ = stride_kv
A_ = padding_q
A_ = stride_q
A_ = initializer_range
A_ = layer_norm_eps
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__a :str = logging.get_logger(__name__) # pylint: disable=invalid-name
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : WhisperForConditionalGeneration , UpperCAmelCase : WhisperProcessor , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : StableDiffusionSafetyChecker , UpperCAmelCase : CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase , speech_processor=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , feature_extractor=UpperCAmelCase , )
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase )
def __A ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=16000 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : int , ):
A_ = self.speech_processor.feature_extractor(
UpperCAmelCase , return_tensors="pt" , sampling_rate=UpperCAmelCase ).input_features.to(self.device )
A_ = self.speech_model.generate(UpperCAmelCase , max_length=480000 )
A_ = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , normalize=UpperCAmelCase )[
0
]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = 1
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = len(UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase , UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(UpperCAmelCase )}.''' )
# get prompt text embeddings
A_ = self.tokenizer(
UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
A_ = text_input_ids[:, : self.tokenizer.model_max_length]
A_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ = text_embeddings.shape
A_ = text_embeddings.repeat(1 , UpperCAmelCase , 1 )
A_ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ = 42
if negative_prompt is None:
A_ = [""] * batch_size
elif type(UpperCAmelCase ) is not type(UpperCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase )} !='''
f''' {type(UpperCAmelCase )}.''' )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = [negative_prompt]
elif batch_size != len(UpperCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
A_ = negative_prompt
A_ = text_input_ids.shape[-1]
A_ = self.tokenizer(
UpperCAmelCase , padding="max_length" , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" , )
A_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ = uncond_embeddings.shape[1]
A_ = uncond_embeddings.repeat(1 , UpperCAmelCase , 1 )
A_ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device="cpu" , dtype=UpperCAmelCase ).to(
self.device )
else:
A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
A_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ = {}
if accepts_eta:
A_ = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
A_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
A_ = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ = noise_pred.chunk(2 )
A_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = 1 / 0.18_215 * latents
A_ = self.vae.decode(UpperCAmelCase ).sample
A_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase , nsfw_content_detected=UpperCAmelCase )
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A_ = s_dict.pop(__UpperCamelCase )
elif "subsample" in key:
A_ = s_dict.pop(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ , A_ = emb.weight.shape
A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase )
A_ = emb.weight.data
return lin_layer
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = torch.load(__UpperCamelCase ,map_location="cpu" )
A_ = mam_aaa["args"]
A_ = mam_aaa["model"]
A_ = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(__UpperCamelCase )
rename_keys(__UpperCamelCase )
A_ = state_dict["decoder.embed_tokens.weight"].shape[0]
A_ = args.share_decoder_input_output_embed
A_ = [int(__UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
A_ = SpeechaTextConfig(
vocab_size=__UpperCamelCase ,max_source_positions=args.max_source_positions ,max_target_positions=args.max_target_positions ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="relu" ,num_conv_layers=len(__UpperCamelCase ) ,conv_channels=args.conv_channels ,conv_kernel_sizes=__UpperCamelCase ,input_feat_per_channel=args.input_feat_per_channel ,input_channels=args.input_channels ,tie_word_embeddings=__UpperCamelCase ,num_beams=5 ,max_length=200 ,use_cache=__UpperCamelCase ,decoder_start_token_id=2 ,early_stopping=__UpperCamelCase ,)
A_ = SpeechaTextForConditionalGeneration(__UpperCamelCase )
A_ , A_ = model.model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
A_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A_ = lm_head_weights
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__a :Dict = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=3 , UpperCAmelCase : List[Any]=18 , UpperCAmelCase : Optional[Any]=30 , UpperCAmelCase : str=400 , UpperCAmelCase : Any=True , UpperCAmelCase : int=None , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[int]=False , ):
A_ = size if size is not None else {"height": 20, "width": 20}
A_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = do_center_crop
A_ = crop_size
A_ = do_normalize
A_ = image_mean
A_ = image_std
A_ = do_reduce_labels
def __A ( self : Any ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
"""simple docstring"""
A_ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
A_ = Image.open(dataset[0]["file"] )
A_ = Image.open(dataset[1]["file"] )
return image, map
def __snake_case ( ):
"""simple docstring"""
A_ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" )
A_ = Image.open(ds[0]["file"] )
A_ = Image.open(ds[1]["file"] )
A_ = Image.open(ds[2]["file"] )
A_ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Any = BeitImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ):
A_ = BeitImageProcessingTester(self )
@property
def __A ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Any ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
def __A ( self : Any ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase )
A_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase )
def __A ( self : List[str] ):
pass
def __A ( self : int ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Optional[int] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Optional[int] ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Any ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
A_ = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ = prepare_semantic_single_inputs()
A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ = prepare_semantic_batch_inputs()
A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __A ( self : int ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ = prepare_semantic_single_inputs()
A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
A_ = True
A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
from __future__ import annotations
from typing import Any
class _a ( snake_case_ ):
"""simple docstring"""
pass
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Any ):
A_ = data
A_ = None
def __iter__( self : Optional[Any] ):
A_ = self
A_ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCAmelCase )
yield node.data
A_ = node.next_node
@property
def __A ( self : int ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__a :Union[str, Any] = Node(1)
__a :Optional[Any] = Node(2)
__a :List[str] = Node(3)
__a :Dict = Node(4)
print(root_node.has_loop) # False
__a :Optional[int] = root_node.next_node
print(root_node.has_loop) # True
__a :int = Node(5)
__a :Any = Node(6)
__a :Optional[int] = Node(5)
__a :Union[str, Any] = Node(6)
print(root_node.has_loop) # False
__a :Dict = Node(1)
print(root_node.has_loop) # False
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ):
pass
@is_pipeline_test
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ):
A_ = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
A_ = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def __A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = vqa_pipeline(UpperCAmelCase , top_k=1 )
self.assertEqual(
UpperCAmelCase , [
[{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}],
[{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}],
] , )
@require_torch
def __A ( self : Union[str, Any] ):
A_ = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
A_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A_ = "How many cats are there?"
A_ = vqa_pipeline(image=UpperCAmelCase , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase , [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}] )
A_ = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase , [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}] )
@slow
@require_torch
def __A ( self : Optional[Any] ):
A_ = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
A_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A_ = "How many cats are there?"
A_ = vqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
A_ = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
A_ = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def __A ( self : Tuple ):
pass
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
def __snake_case ( __UpperCamelCase : list[list[float]] ):
"""simple docstring"""
A_ = []
for data in source_data:
for i, el in enumerate(__UpperCamelCase ):
if len(__UpperCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__UpperCamelCase ) )
return data_lists
def __snake_case ( __UpperCamelCase : list[list[float]] ,__UpperCamelCase : list[int] ):
"""simple docstring"""
A_ = []
for dlist, weight in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase )
A_ = max(__UpperCamelCase )
A_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A_ = f'''Invalid weight of {weight:f} provided'''
raise ValueError(__UpperCamelCase )
score_lists.append(__UpperCamelCase )
return score_lists
def __snake_case ( __UpperCamelCase : list[list[float]] ):
"""simple docstring"""
A_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__UpperCamelCase ):
A_ = final_scores[j] + ele
return final_scores
def __snake_case ( __UpperCamelCase : list[list[float]] ,__UpperCamelCase : list[int] ):
"""simple docstring"""
A_ = get_data(__UpperCamelCase )
A_ = calculate_each_score(__UpperCamelCase ,__UpperCamelCase )
A_ = generate_final_scores(__UpperCamelCase )
# append scores to source data
for i, ele in enumerate(__UpperCamelCase ):
source_data[i].append(__UpperCamelCase )
return source_data
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[int] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int]=False ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : int=False ):
"""simple docstring"""
A_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A_ = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
A_ = state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ = in_proj_weight[
: config.hidden_size, :
]
A_ = in_proj_bias[: config.hidden_size]
A_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ = in_proj_weight[
-config.hidden_size :, :
]
A_ = in_proj_bias[-config.hidden_size :]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ):
"""simple docstring"""
A_ = dct.pop(__UpperCamelCase )
A_ = val
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = ViltConfig(image_size=384 ,patch_size=32 ,tie_word_embeddings=__UpperCamelCase )
A_ = False
A_ = False
A_ = False
A_ = False
if "vqa" in checkpoint_url:
A_ = True
A_ = 3129
A_ = "huggingface/label-files"
A_ = "vqa2-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
A_ = ViltForQuestionAnswering(__UpperCamelCase )
elif "nlvr" in checkpoint_url:
A_ = True
A_ = 2
A_ = {0: "False", 1: "True"}
A_ = {v: k for k, v in config.idalabel.items()}
A_ = 3
A_ = ViltForImagesAndTextClassification(__UpperCamelCase )
elif "irtr" in checkpoint_url:
A_ = True
A_ = ViltForImageAndTextRetrieval(__UpperCamelCase )
elif "mlm_itm" in checkpoint_url:
A_ = True
A_ = ViltForMaskedLM(__UpperCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
A_ = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location="cpu" )["state_dict"]
A_ = create_rename_keys(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase )
if mlm_model or irtr_model:
A_ = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A_ , A_ = model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__UpperCamelCase )
# Define processor
A_ = ViltImageProcessor(size=384 )
A_ = BertTokenizer.from_pretrained("bert-base-uncased" )
A_ = ViltProcessor(__UpperCamelCase ,__UpperCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
A_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" ,stream=__UpperCamelCase ).raw )
A_ = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" ,stream=__UpperCamelCase ).raw )
A_ = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
A_ = processor(__UpperCamelCase ,__UpperCamelCase ,return_tensors="pt" )
A_ = processor(__UpperCamelCase ,__UpperCamelCase ,return_tensors="pt" )
A_ = model(
input_ids=encoding_a.input_ids ,pixel_values=encoding_a.pixel_values ,pixel_values_a=encoding_a.pixel_values ,)
else:
A_ = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" ,stream=__UpperCamelCase ).raw )
if mlm_model:
A_ = "a bunch of [MASK] laying on a [MASK]."
else:
A_ = "How many cats are there?"
A_ = processor(__UpperCamelCase ,__UpperCamelCase ,return_tensors="pt" )
A_ = model(**__UpperCamelCase )
# Verify outputs
if mlm_model:
A_ = torch.Size([1, 11, 3_0522] )
A_ = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,__UpperCamelCase ,atol=1E-4 )
# verify masked token prediction equals "cats"
A_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A_ = torch.Size([1, 3129] )
A_ = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,__UpperCamelCase ,atol=1E-4 )
# verify vqa prediction equals "2"
A_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A_ = torch.Size([1, 2] )
A_ = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a :Any = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Tuple=True ,__UpperCamelCase : Tuple="pt" ):
"""simple docstring"""
A_ = {"add_prefix_space": True} if isinstance(__UpperCamelCase ,__UpperCamelCase ) and not line.startswith(" " ) else {}
A_ = padding_side
return tokenizer(
[line] ,max_length=__UpperCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__UpperCamelCase ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,)
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = input_ids.ne(__UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]="train" , UpperCAmelCase : Any=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="" , ):
super().__init__()
A_ = Path(UpperCAmelCase ).joinpath(type_path + ".source" )
A_ = Path(UpperCAmelCase ).joinpath(type_path + ".target" )
A_ = self.get_char_lens(self.src_file )
A_ = max_source_length
A_ = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
A_ = tokenizer
A_ = prefix
if n_obs is not None:
A_ = self.src_lens[:n_obs]
A_ = src_lang
A_ = tgt_lang
def __len__( self : Union[str, Any] ):
return len(self.src_lens )
def __getitem__( self : Optional[int] , UpperCAmelCase : List[str] ):
A_ = index + 1 # linecache starts at 1
A_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCAmelCase ).rstrip("\n" )
A_ = linecache.getline(str(self.tgt_file ) , UpperCAmelCase ).rstrip("\n" )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer
)
A_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer
A_ = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_source_length , "right" )
A_ = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_target_length , "right" )
A_ = source_inputs["input_ids"].squeeze()
A_ = target_inputs["input_ids"].squeeze()
A_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __A ( UpperCAmelCase : Dict ):
return [len(UpperCAmelCase ) for x in Path(UpperCAmelCase ).open().readlines()]
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[Any] ):
A_ = torch.stack([x["input_ids"] for x in batch] )
A_ = torch.stack([x["attention_mask"] for x in batch] )
A_ = torch.stack([x["decoder_input_ids"] for x in batch] )
A_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase )
else self.tokenizer.pad_token_id
)
A_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase )
else self.tokenizer.pad_token_id
)
A_ = trim_batch(UpperCAmelCase , UpperCAmelCase )
A_ , A_ = trim_batch(UpperCAmelCase , UpperCAmelCase , attention_mask=UpperCAmelCase )
A_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__a :int = getLogger(__name__)
def __snake_case ( __UpperCamelCase : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(__UpperCamelCase ) )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = get_git_info()
save_json(__UpperCamelCase ,os.path.join(__UpperCamelCase ,"git_log.json" ) )
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple=4 ,**__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"w" ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase ,**__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(__UpperCamelCase ) as f:
return json.load(__UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = git.Repo(search_parent_directories=__UpperCamelCase )
A_ = {
"repo_id": str(__UpperCamelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __snake_case ( __UpperCamelCase : Callable ,__UpperCamelCase : Iterable ):
"""simple docstring"""
return list(map(__UpperCamelCase ,__UpperCamelCase ) )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Dict ):
"""simple docstring"""
with open(__UpperCamelCase ,"wb" ) as f:
return pickle.dump(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
def remove_articles(__UpperCamelCase : Optional[int] ):
return re.sub(R"\b(a|an|the)\b" ," " ,__UpperCamelCase )
def white_space_fix(__UpperCamelCase : List[Any] ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase : Tuple ):
A_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = normalize_answer(__UpperCamelCase ).split()
A_ = normalize_answer(__UpperCamelCase ).split()
A_ = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase )
A_ = sum(common.values() )
if num_same == 0:
return 0
A_ = 1.0 * num_same / len(__UpperCamelCase )
A_ = 1.0 * num_same / len(__UpperCamelCase )
A_ = (2 * precision * recall) / (precision + recall)
return fa
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
"""simple docstring"""
return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = 0
for hypo, pred in zip(__UpperCamelCase ,__UpperCamelCase ):
em += exact_match_score(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
em /= len(__UpperCamelCase )
return {"em": em}
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
return model_prefix.startswith("rag" )
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ = "dropout_rate"
for p in extra_params:
if getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
if not hasattr(__UpperCamelCase ,__UpperCamelCase ) and not hasattr(__UpperCamelCase ,equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__UpperCamelCase ) )
delattr(__UpperCamelCase ,__UpperCamelCase )
continue
A_ = p if hasattr(__UpperCamelCase ,__UpperCamelCase ) else equivalent_param[p]
setattr(__UpperCamelCase ,__UpperCamelCase ,getattr(__UpperCamelCase ,__UpperCamelCase ) )
delattr(__UpperCamelCase ,__UpperCamelCase )
return hparams, config
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : str = "▁" , UpperCAmelCase : bool = True , UpperCAmelCase : Union[str, AddedToken] = "<unk>" , UpperCAmelCase : Union[str, AddedToken] = "</s>" , UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ):
A_ = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
A_ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
A_ = token_dict["token"]
A_ = Tokenizer(Unigram() )
A_ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
A_ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCAmelCase , add_prefix_space=UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
A_ = decoders.Metaspace(replacement=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
A_ = TemplateProcessing(
single=f'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
A_ = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : int = 8000 , UpperCAmelCase : bool = True , ):
A_ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase , )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = [files]
self._tokenizer.train(UpperCAmelCase , trainer=UpperCAmelCase )
self.add_unk_id()
def __A ( self : Tuple , UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , UpperCAmelCase : int = 8000 , UpperCAmelCase : bool = True , ):
A_ = trainers.UnigramTrainer(
vocab_size=UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=UpperCAmelCase , )
self._tokenizer.train_from_iterator(UpperCAmelCase , trainer=UpperCAmelCase )
self.add_unk_id()
def __A ( self : Tuple ):
A_ = json.loads(self._tokenizer.to_str() )
A_ = self.special_tokens["unk"]["id"]
A_ = Tokenizer.from_str(json.dumps(UpperCAmelCase ) )
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : str=400 , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=0.9 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , ):
A_ = size if size is not None else {"shortest_edge": 30}
A_ = crop_size if crop_size is not None else {"height": 30, "width": 30}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = min_resolution
A_ = max_resolution
A_ = do_resize_and_center_crop
A_ = size
A_ = crop_pct
A_ = crop_size
A_ = do_normalize
A_ = image_mean
A_ = image_std
def __A ( self : Dict ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _a ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : str = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self : Any ):
A_ = PoolFormerImageProcessingTester(self )
@property
def __A ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : List[str] ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "crop_pct" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
def __A ( self : Dict ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __A ( self : List[Any] ):
pass
def __A ( self : Tuple ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : Any ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __A ( self : str ):
# Initialize image_processing
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
import string
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
A_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
A_ = string.ascii_uppercase.find(__UpperCamelCase )
A_ = num - key
if num < 0:
A_ = num + len(string.ascii_uppercase )
A_ = translated + string.ascii_uppercase[num]
else:
A_ = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def __snake_case ( ):
"""simple docstring"""
A_ = input("Encrypted message: " )
A_ = message.upper()
decrypt(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 350 |
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
__a :int = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 351 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a :Optional[Any] = logging.get_logger(__name__)
__a :Optional[Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'data2vec-text'
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[Any]=768 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : List[Any]=1E-12 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Any=0 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : int="absolute" , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = classifier_dropout
class _a ( snake_case_ ):
"""simple docstring"""
@property
def __A ( self : Dict ):
if self.task == "multiple-choice":
A_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 329 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__a :Tuple = logging.get_logger(__name__)
class _a ( enum.Enum ):
"""simple docstring"""
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[int] = 1
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 'generated'
def __init__( self : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : str ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __A ( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : Any , ):
A_ = {}
if truncation is not None:
A_ = truncation
A_ = generate_kwargs
A_ = {}
if return_tensors is not None and return_type is None:
A_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A_ = return_type
if clean_up_tokenization_spaces is not None:
A_ = clean_up_tokenization_spaces
if stop_sequence is not None:
A_ = self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
if len(UpperCAmelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
A_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __A ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
return True
def __A ( self : Dict , *UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
A_ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , UpperCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
A_ = ([prefix + arg for arg in args[0]],)
A_ = True
elif isinstance(args[0] , UpperCAmelCase ):
A_ = (prefix + args[0],)
A_ = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
A_ = self.tokenizer(*UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
A_ = super().__call__(*UpperCAmelCase , **UpperCAmelCase )
if (
isinstance(args[0] , UpperCAmelCase )
and all(isinstance(UpperCAmelCase , UpperCAmelCase ) for el in args[0] )
and all(len(UpperCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase : str ):
A_ = self._parse_and_tokenize(UpperCAmelCase , truncation=UpperCAmelCase , **UpperCAmelCase )
return inputs
def __A ( self : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
if self.framework == "pt":
A_ , A_ = model_inputs["input_ids"].shape
elif self.framework == "tf":
A_ , A_ = tf.shape(model_inputs["input_ids"] ).numpy()
A_ = generate_kwargs.get("min_length" , self.model.config.min_length )
A_ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(UpperCAmelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
A_ = self.model.generate(**UpperCAmelCase , **UpperCAmelCase )
A_ = output_ids.shape[0]
if self.framework == "pt":
A_ = output_ids.reshape(UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A_ = tf.reshape(UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=ReturnType.TEXT , UpperCAmelCase : Tuple=False ):
A_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A_ = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
A_ = {
f'''{self.return_name}_text''': self.tokenizer.decode(
UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , )
}
records.append(UpperCAmelCase )
return records
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 'summary'
def __call__( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 'translation'
def __A ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def __A ( self : str , *UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Dict=None ):
if getattr(self.tokenizer , "_build_translation_inputs" , UpperCAmelCase ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase , return_tensors=self.framework , truncation=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase )
else:
return super()._parse_and_tokenize(*UpperCAmelCase , truncation=UpperCAmelCase )
def __A ( self : Optional[int] , UpperCAmelCase : str=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
A_ , A_ , A_ = super()._sanitize_parameters(**UpperCAmelCase )
if src_lang is not None:
A_ = src_lang
if tgt_lang is not None:
A_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A_ = kwargs.get("task" , self.task )
A_ = task.split("_" )
if task and len(UpperCAmelCase ) == 4:
# translation, XX, to YY
A_ = items[1]
A_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *UpperCAmelCase : Any , **UpperCAmelCase : Any ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
| 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 329 | 0 |
def __snake_case ( __UpperCamelCase : list ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__UpperCamelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(__UpperCamelCase ) == 1:
return True
A_ = series[1] - series[0]
for index in range(len(__UpperCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __snake_case ( __UpperCamelCase : list ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(__UpperCamelCase ) == 0:
raise ValueError("Input list must be a non empty list" )
A_ = 0
for val in series:
answer += val
return answer / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
A_ = tempfile.mkdtemp()
A_ = BlipImageProcessor()
A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer
def __A ( self : Optional[Any] , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Any ):
A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self : int ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = processor(text=UpperCAmelCase )
A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def __A ( self : Any ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(UpperCAmelCase )
A_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
A_ = "lower newer"
A_ = self.prepare_image_inputs()
A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355 |
import math
__a :Union[str, Any] = 10
__a :Union[str, Any] = 7
__a :int = BALLS_PER_COLOUR * NUM_COLOURS
def __snake_case ( __UpperCamelCase : int = 20 ):
"""simple docstring"""
A_ = math.comb(__UpperCamelCase ,__UpperCamelCase )
A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase )
A_ = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 329 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :Union[str, Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Any=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Union[str, Any] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : Any , UpperCAmelCase : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : str , UpperCAmelCase : Tuple ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Tuple , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch
| 356 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__a :Optional[Any] = logging.get_logger(__name__)
__a :Any = {'vocab_file': 'vocab.txt'}
__a :Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__a :List[str] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__a :List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**UpperCAmelCase )
A_ = do_lower_case
def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ):
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 329 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = tempfile.mkdtemp()
A_ = SamImageProcessor()
A_ = SamProcessor(UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Union[str, Any] , **UpperCAmelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def __A ( self : int ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : Tuple ):
A_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.get_image_processor()
A_ = SamProcessor(image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def __A ( self : Optional[Any] ):
A_ = self.get_image_processor()
A_ = SamProcessor(image_processor=UpperCAmelCase )
A_ = [torch.ones((1, 3, 5, 5) )]
A_ = [[1764, 2646]]
A_ = [[683, 1024]]
A_ = processor.post_process_masks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A_ = processor.post_process_masks(
UpperCAmelCase , torch.tensor(UpperCAmelCase ) , torch.tensor(UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A_ = [np.ones((1, 3, 5, 5) )]
A_ = processor.post_process_masks(UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A_ = [[1, 0], [0, 1]]
with self.assertRaises(UpperCAmelCase ):
A_ = processor.post_process_masks(UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) )
@require_vision
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ):
A_ = tempfile.mkdtemp()
A_ = SamImageProcessor()
A_ = SamProcessor(UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : Optional[Any] , **UpperCAmelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : str ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Optional[Any] ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : str ):
A_ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
A_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def __A ( self : Optional[int] ):
A_ = self.get_image_processor()
A_ = SamProcessor(image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="np" )
A_ = processor(images=UpperCAmelCase , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = SamProcessor(image_processor=UpperCAmelCase )
A_ = [tf.ones((1, 3, 5, 5) )]
A_ = [[1764, 2646]]
A_ = [[683, 1024]]
A_ = processor.post_process_masks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A_ = processor.post_process_masks(
UpperCAmelCase , tf.convert_to_tensor(UpperCAmelCase ) , tf.convert_to_tensor(UpperCAmelCase ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A_ = [np.ones((1, 3, 5, 5) )]
A_ = processor.post_process_masks(
UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A_ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A_ = processor.post_process_masks(
UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) , return_tensors="tf" )
@require_vision
@require_torchvision
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
A_ = tempfile.mkdtemp()
A_ = SamImageProcessor()
A_ = SamProcessor(UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __A ( self : int , **UpperCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor
def __A ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def __A ( self : Union[str, Any] ):
A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __A ( self : str ):
A_ = self.get_image_processor()
A_ = SamProcessor(image_processor=UpperCAmelCase )
A_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A_ = [tf.convert_to_tensor(UpperCAmelCase )]
A_ = [torch.tensor(UpperCAmelCase )]
A_ = [[1764, 2646]]
A_ = [[683, 1024]]
A_ = processor.post_process_masks(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="tf" )
A_ = processor.post_process_masks(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __A ( self : Tuple ):
A_ = self.get_image_processor()
A_ = SamProcessor(image_processor=UpperCAmelCase )
A_ = self.prepare_image_inputs()
A_ = image_processor(UpperCAmelCase , return_tensors="pt" )["pixel_values"].numpy()
A_ = processor(images=UpperCAmelCase , return_tensors="pt" )["pixel_values"].numpy()
A_ = image_processor(UpperCAmelCase , return_tensors="tf" )["pixel_values"].numpy()
A_ = processor(images=UpperCAmelCase , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) )
| 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 329 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 358 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _a :
"""simple docstring"""
@staticmethod
def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ):
pass
@is_pipeline_test
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __A ( self : List[str] ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(UpperCAmelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@require_tf
def __A ( self : int ):
A_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
[
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
{"score": 0.333, "label": ANY(UpperCAmelCase )},
],
] , )
@slow
@require_torch
def __A ( self : Any ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __A ( self : Optional[Any] ):
A_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 329 | 0 |
from functools import lru_cache
@lru_cache
def __snake_case ( __UpperCamelCase : int ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a :Tuple = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = ['MobileViTFeatureExtractor']
__a :int = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 329 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __snake_case ( __UpperCamelCase : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ):
"""simple docstring"""
A_ = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(__UpperCamelCase ,__UpperCamelCase )
# Predict target for test data
A_ = xgb.predict(__UpperCamelCase )
A_ = predictions.reshape(len(__UpperCamelCase ) ,1 )
return predictions
def __snake_case ( ):
"""simple docstring"""
A_ = fetch_california_housing()
A_ , A_ = data_handling(__UpperCamelCase )
A_ , A_ , A_ , A_ = train_test_split(
__UpperCamelCase ,__UpperCamelCase ,test_size=0.25 ,random_state=1 )
A_ = xgboost(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(__UpperCamelCase ,__UpperCamelCase )}''' )
print(f'''Mean Square Error : {mean_squared_error(__UpperCamelCase ,__UpperCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 361 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for char in word:
A_ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = set()
for token in tokens:
A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A_ = list(__UpperCamelCase )
return word_list
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A_ = bert_tokens
A_ , A_ = 0, len(__UpperCamelCase )
while start < end:
A_ = True
if is_chinese(bert_word[start] ):
A_ = min(end - start ,__UpperCamelCase )
for i in range(__UpperCamelCase ,1 ,-1 ):
A_ = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 ,start + i ):
A_ = "##" + bert_word[j]
A_ = start + i
A_ = False
break
if single_word:
start += 1
return bert_word
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ):
"""simple docstring"""
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0]
A_ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for i in range(0 ,len(__UpperCamelCase ) ,100 ):
A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A_ = []
for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ):
A_ = []
for id in input_ids:
A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase )
A_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A_ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def __snake_case ( __UpperCamelCase : Dict ):
"""simple docstring"""
with open(args.file_name ,"r" ,encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ = LTP(args.ltp ) # faster in GPU device
A_ = BertTokenizer.from_pretrained(args.bert )
A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
with open(args.save_path ,"w" ,encoding="utf-8" ) as f:
A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__a :Dict = parser.parse_args()
main(args)
| 329 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( snake_case_ ):
_lowerCamelCase : Dict = 'new-model'
if is_tf_available():
class _a ( snake_case_ ):
_lowerCamelCase : Any = NewModelConfig
@require_tf
class _a ( unittest.TestCase ):
@slow
def __A ( self : Dict ):
A_ = "bert-base-cased"
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Optional[Any] ):
A_ = "bert-base-cased"
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : List[str] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase )
A_ , A_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : str ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase )
A_ , A_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase )
A_ , A_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : str ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
def __A ( self : Dict ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
@slow
@require_tensorflow_probability
def __A ( self : int ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
A_ = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase )
A_ , A_ = TFAutoModelForTableQuestionAnswering.from_pretrained(
UpperCAmelCase , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Dict ):
A_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 )
def __A ( self : Dict ):
A_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 )
def __A ( self : List[str] ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
A_ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
A_ = copy.deepcopy(model.config )
A_ = ["FunnelBaseModel"]
A_ = TFAutoModel.from_config(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase )
A_ = TFAutoModel.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Any ):
try:
AutoConfig.register("new-model" , UpperCAmelCase )
A_ = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(UpperCAmelCase ):
auto_class.register(UpperCAmelCase , UpperCAmelCase )
auto_class.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
auto_class.register(UpperCAmelCase , UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
A_ = BertModelTester(self ).get_config()
A_ = NewModelConfig(**tiny_config.to_dict() )
A_ = auto_class.from_config(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase )
A_ = auto_class.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __A ( self : str ):
with self.assertRaisesRegex(
UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
A_ = TFAutoModel.from_pretrained("bert-base" )
def __A ( self : List[str] ):
with self.assertRaisesRegex(
UpperCAmelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
A_ = TFAutoModel.from_pretrained(UpperCAmelCase , revision="aaaaaa" )
def __A ( self : int ):
with self.assertRaisesRegex(
UpperCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
A_ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __A ( self : List[Any] ):
with self.assertRaisesRegex(UpperCAmelCase , "Use `from_pt=True` to load this model" ):
A_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __A ( self : Tuple ):
# Make sure we have cached the model.
A_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
A_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
A_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
A_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 362 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __snake_case ( __UpperCamelCase : Features ):
"""simple docstring"""
A_ = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary":
A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase ,__UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , )
A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths}
A_ = _PACKAGED_DATASETS_MODULES["parquet"][1]
A_ = Parquet(
cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , )
def __A ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
A_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ = None
A_ = None
A_ = None
A_ = None
self.builder.download_and_prepare(
download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , )
A_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ):
A_ = dataset
A_ = path_or_buf
A_ = batch_size or get_writer_batch_size(dataset.features )
A_ = parquet_writer_kwargs
def __A ( self : int ):
A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
else:
A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ):
A_ = 0
A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase )
A_ = self.dataset.features.arrow_schema
A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
A_ = query_table(
table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 329 | 0 |
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
A_ = gray_code_sequence_string(__UpperCamelCase )
#
# convert them to integers
for i in range(len(__UpperCamelCase ) ):
A_ = int(sequence[i] ,2 )
return sequence
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
A_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
A_ = gray_code_sequence_string(bit_count - 1 )
A_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
A_ = "0" + smaller_sequence[i]
sequence.append(__UpperCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
A_ = "1" + smaller_sequence[i]
sequence.append(__UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : int = 4 ):
"""simple docstring"""
A_ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = matrix[::-1]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
A_ = [x[::-1] for x in matrix]
return matrix
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
__a :Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 329 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Dict = VQModel
_lowerCamelCase : Union[str, Any] = 'sample'
@property
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=(32, 32) ):
A_ = 4
A_ = 3
A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
return {"sample": image}
@property
def __A ( self : List[str] ):
return (3, 32, 32)
@property
def __A ( self : List[str] ):
return (3, 32, 32)
def __A ( self : Dict ):
A_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : Dict ):
pass
def __A ( self : List[Any] ):
pass
def __A ( self : Any ):
A_ , A_ = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
A_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __A ( self : Union[str, Any] ):
A_ = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
A_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
A_ = image.to(UpperCAmelCase )
with torch.no_grad():
A_ = model(UpperCAmelCase ).sample
A_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
| 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 329 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a :int = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = b.T
A_ = np.sum(np.square(__UpperCamelCase ) ,axis=1 )
A_ = np.sum(np.square(__UpperCamelCase ) ,axis=0 )
A_ = np.matmul(__UpperCamelCase ,__UpperCamelCase )
A_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = x.reshape(-1 ,3 )
A_ = squared_euclidean_distance(__UpperCamelCase ,__UpperCamelCase )
return np.argmin(__UpperCamelCase ,axis=1 )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['pixel_values']
def __init__( self : Dict , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , **UpperCAmelCase : int , ):
super().__init__(**UpperCAmelCase )
A_ = size if size is not None else {"height": 256, "width": 256}
A_ = get_size_dict(UpperCAmelCase )
A_ = np.array(UpperCAmelCase ) if clusters is not None else None
A_ = do_resize
A_ = size
A_ = resample
A_ = do_normalize
A_ = do_color_quantize
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
A_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ):
A_ = rescale(image=UpperCAmelCase , scale=1 / 127.5 , data_format=UpperCAmelCase )
A_ = image - 1
return image
def __A ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase : Optional[int] , ):
A_ = do_resize if do_resize is not None else self.do_resize
A_ = size if size is not None else self.size
A_ = get_size_dict(UpperCAmelCase )
A_ = resample if resample is not None else self.resample
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A_ = clusters if clusters is not None else self.clusters
A_ = np.array(UpperCAmelCase )
A_ = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
A_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_normalize:
A_ = [self.normalize(image=UpperCAmelCase ) for image in images]
if do_color_quantize:
A_ = [to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A_ = np.array(UpperCAmelCase )
A_ = color_quantize(UpperCAmelCase , UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A_ = images.shape[0]
A_ = images.reshape(UpperCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A_ = list(UpperCAmelCase )
else:
A_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
A_ = {"input_ids": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 365 |
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ''
_lowerCamelCase : Tuple = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : int , UpperCAmelCase : Optional[DatasetInfo] = None , UpperCAmelCase : Optional[str] = None , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(self , **UpperCAmelCase )
A_ = repo_info
A_ = token
A_ = None
def __A ( self : Optional[Any] ):
if self.dir_cache is None:
A_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(UpperCAmelCase ): {"name": str(UpperCAmelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str = "rb" , **UpperCAmelCase : str , ):
if not isinstance(self.repo_info , UpperCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
A_ = hf_hub_url(self.repo_info.id , UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
UpperCAmelCase , mode=UpperCAmelCase , headers=get_authentication_headers_for_url(UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __A ( self : Tuple , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
self._get_dirs()
A_ = self._strip_protocol(UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : int=False , **UpperCAmelCase : Optional[Any] ):
self._get_dirs()
A_ = PurePosixPath(path.strip("/" ) )
A_ = {}
for p, f in self.dir_cache.items():
A_ = PurePosixPath(p.strip("/" ) )
A_ = p.parent
if root == path:
A_ = f
A_ = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 366 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _a :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ):
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = 99
A_ = 384
A_ = 2
A_ = 4
A_ = 37
A_ = "gelu"
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.02
A_ = 3
A_ = 4
A_ = 128
A_ = 2
A_ = 9
A_ = 1
A_ = None
def __A ( self : Optional[int] ):
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
A_ = TFConvBertModel(config=UpperCAmelCase )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ):
A_ = TFConvBertForMaskedLM(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ):
A_ = self.num_labels
A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ):
A_ = self.num_choices
A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ):
A_ = self.num_labels
A_ = TFConvBertForTokenClassification(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
A_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
A_ = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Any = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = False
def __A ( self : List[str] ):
A_ = TFConvBertModelTester(self )
A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __A ( self : int ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __A ( self : str ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = True
if hasattr(UpperCAmelCase , "use_cache" ):
A_ = True
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
for model_class in self.all_model_classes:
A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
A_ = model_class(UpperCAmelCase )
A_ = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" )
A_ = tf.keras.models.load_model(UpperCAmelCase )
A_ = model(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = outputs["encoder_hidden_states"]
A_ = outputs["encoder_attentions"]
else:
A_ = outputs["hidden_states"]
A_ = outputs["attentions"]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
A_ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __A ( self : List[str] ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(UpperCAmelCase )
def __A ( self : Any ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ):
A_ = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
A_ = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ):
A_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCAmelCase )
A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : Dict ):
A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
A_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ = model(UpperCAmelCase )[0]
A_ = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
A_ = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
| 329 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "geglu" , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : str = "layer_norm" , UpperCAmelCase : bool = False , ):
super().__init__()
A_ = only_cross_attention
A_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
A_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A_ = AdaLayerNorm(UpperCAmelCase , UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
A_ = AdaLayerNormZero(UpperCAmelCase , UpperCAmelCase )
else:
A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase )
A_ = Attention(
query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , dropout=UpperCAmelCase , bias=UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A_ = (
AdaLayerNorm(UpperCAmelCase , UpperCAmelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase )
)
A_ = Attention(
query_dim=UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase , dim_head=UpperCAmelCase , dropout=UpperCAmelCase , bias=UpperCAmelCase , upcast_attention=UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none
else:
A_ = None
A_ = None
# 3. Feed-forward
A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase )
A_ = FeedForward(UpperCAmelCase , dropout=UpperCAmelCase , activation_fn=UpperCAmelCase , final_dropout=UpperCAmelCase )
# let chunk size default to None
A_ = None
A_ = 0
def __A ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ):
# Sets chunk feed-forward
A_ = chunk_size
A_ = dim
def __A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A_ = self.norma(UpperCAmelCase , UpperCAmelCase )
elif self.use_ada_layer_norm_zero:
A_ , A_ , A_ , A_ , A_ = self.norma(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hidden_dtype=hidden_states.dtype )
else:
A_ = self.norma(UpperCAmelCase )
A_ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A_ = self.attna(
UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase , **UpperCAmelCase , )
if self.use_ada_layer_norm_zero:
A_ = gate_msa.unsqueeze(1 ) * attn_output
A_ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A_ = (
self.norma(UpperCAmelCase , UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase )
)
A_ = self.attna(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase , )
A_ = attn_output + hidden_states
# 3. Feed-forward
A_ = self.norma(UpperCAmelCase )
if self.use_ada_layer_norm_zero:
A_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A_ = torch.cat(
[self.ff(UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A_ = self.ff(UpperCAmelCase )
if self.use_ada_layer_norm_zero:
A_ = gate_mlp.unsqueeze(1 ) * ff_output
A_ = ff_output + hidden_states
return hidden_states
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 4 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : str = "geglu" , UpperCAmelCase : bool = False , ):
super().__init__()
A_ = int(dim * mult )
A_ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A_ = GELU(UpperCAmelCase , UpperCAmelCase )
if activation_fn == "gelu-approximate":
A_ = GELU(UpperCAmelCase , UpperCAmelCase , approximate="tanh" )
elif activation_fn == "geglu":
A_ = GEGLU(UpperCAmelCase , UpperCAmelCase )
elif activation_fn == "geglu-approximate":
A_ = ApproximateGELU(UpperCAmelCase , UpperCAmelCase )
A_ = nn.ModuleList([] )
# project in
self.net.append(UpperCAmelCase )
# project dropout
self.net.append(nn.Dropout(UpperCAmelCase ) )
# project out
self.net.append(nn.Linear(UpperCAmelCase , UpperCAmelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCAmelCase ) )
def __A ( self : Dict , UpperCAmelCase : int ):
for module in self.net:
A_ = module(UpperCAmelCase )
return hidden_states
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : str = "none" ):
super().__init__()
A_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
A_ = approximate
def __A ( self : str , UpperCAmelCase : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __A ( self : List[Any] , UpperCAmelCase : Any ):
A_ = self.proj(UpperCAmelCase )
A_ = self.gelu(UpperCAmelCase )
return hidden_states
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int ):
super().__init__()
A_ = nn.Linear(UpperCAmelCase , dim_out * 2 )
def __A ( self : Tuple , UpperCAmelCase : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __A ( self : int , UpperCAmelCase : str ):
A_ , A_ = self.proj(UpperCAmelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCAmelCase )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int ):
super().__init__()
A_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def __A ( self : int , UpperCAmelCase : int ):
A_ = self.proj(UpperCAmelCase )
return x * torch.sigmoid(1.702 * x )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : int ):
super().__init__()
A_ = nn.Embedding(UpperCAmelCase , UpperCAmelCase )
A_ = nn.SiLU()
A_ = nn.Linear(UpperCAmelCase , embedding_dim * 2 )
A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any ):
A_ = self.linear(self.silu(self.emb(UpperCAmelCase ) ) )
A_ , A_ = torch.chunk(UpperCAmelCase , 2 )
A_ = self.norm(UpperCAmelCase ) * (1 + scale) + shift
return x
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ):
super().__init__()
A_ = CombinedTimestepLabelEmbeddings(UpperCAmelCase , UpperCAmelCase )
A_ = nn.SiLU()
A_ = nn.Linear(UpperCAmelCase , 6 * embedding_dim , bias=UpperCAmelCase )
A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase , eps=1E-6 )
def __A ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ):
A_ = self.linear(self.silu(self.emb(UpperCAmelCase , UpperCAmelCase , hidden_dtype=UpperCAmelCase ) ) )
A_ , A_ , A_ , A_ , A_ , A_ = emb.chunk(6 , dim=1 )
A_ = self.norm(UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : float = 1E-5 ):
super().__init__()
A_ = num_groups
A_ = eps
if act_fn is None:
A_ = None
else:
A_ = get_activation(UpperCAmelCase )
A_ = nn.Linear(UpperCAmelCase , out_dim * 2 )
def __A ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
if self.act:
A_ = self.act(UpperCAmelCase )
A_ = self.linear(UpperCAmelCase )
A_ = emb[:, :, None, None]
A_ , A_ = emb.chunk(2 , dim=1 )
A_ = F.group_norm(UpperCAmelCase , self.num_groups , eps=self.eps )
A_ = x * (1 + scale) + shift
return x
| 367 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :Dict = logging.get_logger(__name__)
__a :int = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = 'realm'
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
# Common config
A_ = vocab_size
A_ = max_position_embeddings
A_ = hidden_size
A_ = retriever_proj_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = num_candidates
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = type_vocab_size
A_ = layer_norm_eps
# Reader config
A_ = span_hidden_size
A_ = max_span_width
A_ = reader_layer_norm_eps
A_ = reader_beam_size
A_ = reader_seq_len
# Retrieval config
A_ = num_block_records
A_ = searcher_beam_size
| 329 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = JukeboxTokenizer
_lowerCamelCase : Optional[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def __A ( self : Tuple ):
import torch
A_ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
A_ = tokenizer(**self.metas )["input_ids"]
# fmt: off
A_ = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __A ( self : int ):
import torch
A_ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
A_ = tokenizer(**self.metas )["input_ids"]
# fmt: off
A_ = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 368 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a :Optional[Any] = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = original_name.split("." )[0]
A_ = key.split("." )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] )
A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] )
A_ = orig_block_num - offset
A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = OrderedDict()
A_ , A_ = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
A_ = key.replace("network" ,"poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ = key[: key.find("proj" )]
A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' )
A_ = key.replace("proj" ,"projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ = "poolformer.encoder." + key
if "mlp.fc1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" )
if "mlp.fc2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" )
if "norm1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" )
if "norm2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" )
if "layer_scale_1" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" )
if "layer_scale_2" in key:
A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" )
if "head" in key:
A_ = key.replace("head" ,"classifier" )
A_ = value
return new_state_dict
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return image
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = PoolFormerConfig()
# set attributes based on model_name
A_ = "huggingface/label-files"
A_ = model_name[-3:]
A_ = 1000
A_ = "imagenet-1k-id2label.json"
A_ = (1, 1000)
# set config attributes
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ = [2, 2, 6, 2]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s24":
A_ = [4, 4, 12, 4]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 0.9
elif size == "s36":
A_ = [6, 6, 18, 6]
A_ = [64, 128, 320, 512]
A_ = 4.0
A_ = 1E-6
A_ = 0.9
elif size == "m36":
A_ = [6, 6, 18, 6]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
elif size == "m48":
A_ = [8, 8, 24, 8]
A_ = [96, 192, 384, 768]
A_ = 4.0
A_ = 1E-6
A_ = 0.95
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
# Prepare image
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) )
# rename keys
A_ = rename_keys(__UpperCamelCase )
# create HuggingFace model and load state dict
A_ = PoolFormerForImageClassification(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Define image processor
A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase )
A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values
# forward pass
A_ = model(__UpperCamelCase )
A_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
A_ = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
A_ = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
A_ = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
A_ = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__a :int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
def __snake_case ( __UpperCamelCase : int = 5000_0000 ):
"""simple docstring"""
A_ = set()
A_ = int((limit - 24) ** (1 / 2) )
A_ = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,__UpperCamelCase ) ) )
for primea in primes:
A_ = primea * primea
for primea in primes:
A_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
A_ = primea * primea * primea * primea
A_ = square + cube + tetr
if total >= limit:
break
ret.add(__UpperCamelCase )
return len(__UpperCamelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 369 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _a :
"""simple docstring"""
_lowerCamelCase : int
_lowerCamelCase : str
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = {}
A_ = []
A_ = 1
A_ = [1, 2]
A_ = {"a": 1, "b": 2}
A_ = {"a": [1, 2], "b": [3, 4]}
A_ = {"a": {"1": 1}, "b": 2}
A_ = {"a": 1, "b": 2, "c": 3, "d": 4}
A_ = {}
A_ = []
A_ = 2
A_ = [2, 3]
A_ = {"a": 2, "b": 3}
A_ = {"a": [2, 3], "b": [4, 5]}
A_ = {"a": {"1": 2}, "b": 3}
A_ = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
A_ = 2
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
A_ = {"a": 2, "b": 0, "c": 2}
A_ = {
"a": np.eye(2 ).astype(UpperCAmelCase ),
"b": np.zeros(3 ).astype(UpperCAmelCase ),
"c": np.ones(2 ).astype(UpperCAmelCase ),
}
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase )
def __A ( self : List[str] ):
A_ = {"a": 1, "b": 2}
A_ = {"a": 3, "b": 4}
A_ = {"a": 5, "b": 6}
A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase )
def __A ( self : Any ):
class _a :
"""simple docstring"""
_lowerCamelCase : int = 'bar'
A_ = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
A_ = {f'''{i}''': i for i in range(__UpperCamelCase )}
A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( snake_case_ ):
"""simple docstring"""
@require_tf
def __A ( self : Union[str, Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A_ = layers.Dense(2 )
def gen_random_output():
A_ = tf.random.uniform((1, 3) )
return model(UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self : Optional[int] ):
import torch
def gen_random_output():
A_ = torch.nn.Linear(3 , 2 )
A_ = torch.rand(1 , 3 )
return model(UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCAmelCase ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self : Any ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A_ = gen_random_output()
with temp_seed(42 ):
A_ = gen_random_output()
A_ = gen_random_output()
np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" ,[{}] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" ,[
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] ,)
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = NestedDataStructure(__UpperCamelCase ).flatten()
assert output == expected_output
def __snake_case ( ):
"""simple docstring"""
A_ = A(x=1 ,y="foobar" )
A_ = {"x": 1, "y": "foobar"}
assert asdict(__UpperCamelCase ) == expected_output
A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]}
A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(__UpperCamelCase ) == expected_output
with pytest.raises(__UpperCamelCase ):
asdict([1, A(x=10 ,y="foo" )] )
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
return text.split()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __snake_case ( ):
"""simple docstring"""
with Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(__UpperCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A_ = []
for yield_time, content in iflatmap_unordered(
__UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__UpperCamelCase )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(__UpperCamelCase ) == 4
| 370 |
from math import isqrt, loga
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
A_ = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ):
"""simple docstring"""
A_ = degree * loga(__UpperCamelCase )
A_ = int(__UpperCamelCase )
A_ = calculate_prime_numbers(__UpperCamelCase )
A_ = 0
A_ = 0
A_ = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 329 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Optional[int] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__a :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a :str = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = RobertaPreLayerNormConfig.from_pretrained(
__UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) )
A_ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
A_ = tensor_value
A_ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
# convert tokenizer
A_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a :Any = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 329 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCamelCase__ : Any = logging.get_logger("""transformers.models.speecht5""")
UpperCamelCase__ : Union[str, Any] = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
UpperCamelCase__ : Tuple = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
UpperCamelCase__ : Any = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
UpperCamelCase__ : List[str] = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
UpperCamelCase__ : Tuple = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
UpperCamelCase__ : Tuple = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
UpperCamelCase__ : Optional[int] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
UpperCamelCase__ : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
UpperCamelCase__ : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCamelCase__ : Union[str, Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[int] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
UpperCamelCase__ : List[Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
UpperCamelCase__ : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
UpperCamelCase__ : Any = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> str:
"""simple docstring"""
for attribute in key.split('''.''' ):
a = getattr(snake_case_, snake_case_ )
if weight_type is not None:
a = getattr(snake_case_, snake_case_ ).shape
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
elif weight_type == "running_mean":
a = value
elif weight_type == "running_var":
a = value
elif weight_type == "num_batches_tracked":
a = value
else:
a = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a , a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = []
if task == "s2t":
a = hf_model.speechta.encoder.prenet.feature_encoder
a = MAPPING_S2T
a = IGNORE_KEYS_S2T
elif task == "t2s":
a = None
a = MAPPING_T2S
a = IGNORE_KEYS_T2S
elif task == "s2s":
a = hf_model.speechta.encoder.prenet.feature_encoder
a = MAPPING_S2S
a = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(snake_case_, snake_case_ ):
logger.info(f"""{name} was ignored""" )
continue
a = False
if "conv_layers" in name:
load_conv_layer(
snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', )
a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
a , a = key.split('''.*.''' )
if prefix in name and suffix in name:
a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
a = True
if "*" in mapped_key:
a = name.split(snake_case_ )[0].split('''.''' )[-2]
a = mapped_key.replace('''*''', snake_case_ )
if "weight_g" in name:
a = '''weight_g'''
elif "weight_v" in name:
a = '''weight_v'''
elif "bias" in name:
a = '''bias'''
elif "weight" in name:
a = '''weight'''
elif "running_mean" in name:
a = '''running_mean'''
elif "running_var" in name:
a = '''running_var'''
elif "num_batches_tracked" in name:
a = '''num_batches_tracked'''
else:
a = None
set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = full_name.split('''conv_layers.''' )[-1]
a = name.split('''.''' )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=None, ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
a = SpeechTaConfig.from_pretrained(snake_case_ )
else:
a = SpeechTaConfig()
if task == "s2t":
a = config.max_text_positions
a = SpeechTaForSpeechToText(snake_case_ )
elif task == "t2s":
a = 1_8_7_6
a = 6_0_0
a = config.max_speech_positions
a = SpeechTaForTextToSpeech(snake_case_ )
elif task == "s2s":
a = 1_8_7_6
a = config.max_speech_positions
a = SpeechTaForSpeechToSpeech(snake_case_ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
a = SpeechTaTokenizer(snake_case_, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
a = AddedToken('''<mask>''', lstrip=snake_case_, rstrip=snake_case_ )
a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
a = SpeechTaFeatureExtractor()
a = SpeechTaProcessor(tokenizer=snake_case_, feature_extractor=snake_case_ )
processor.save_pretrained(snake_case_ )
a = torch.load(snake_case_ )
recursively_load_weights(fairseq_checkpoint['''model'''], snake_case_, snake_case_ )
model.save_pretrained(snake_case_ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
UpperCamelCase__ : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 330 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = ''''''
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = ''''''
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = int('''0b''' + data[0] + data[-1], 2 )
a = int('''0b''' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = message[:4]
a = message[4:]
a = apply_table(snake_case_, snake_case_ )
a = xor(snake_case_, snake_case_ )
a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741
a = apply_sbox(snake_case_, temp[4:] )
a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741
a = '''0''' * (2 - len(snake_case_ )) + r
a = apply_table(l + r, snake_case_ )
a = xor(snake_case_, snake_case_ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ : int = input("""Enter 10 bit key: """)
UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """)
UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ : Optional[int] = [2, 4, 3, 1]
UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table)
UpperCamelCase__ : str = temp[:5]
UpperCamelCase__ : List[Any] = temp[5:]
UpperCamelCase__ : Dict = left_shift(left)
UpperCamelCase__ : Any = left_shift(right)
UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : int = left_shift(right)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : Dict = left_shift(right)
UpperCamelCase__ : List[str] = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ : Tuple = apply_table(message, IP)
UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4]
UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP)
UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4]
UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 330 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_=None, snake_case_=None ) -> str:
"""simple docstring"""
if "." in tensor_name:
a = tensor_name.split('''.''' )
for split in splits[:-1]:
a = getattr(snake_case_, snake_case_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
a = new_module
a = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
a = tensor_name in module._buffers
a = getattr(snake_case_, snake_case_ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
a = False
a = False
if is_buffer or not is_bitsandbytes_available():
a = False
a = False
else:
a = hasattr(bnb.nn, '''Params4bit''' ) and isinstance(module._parameters[tensor_name], bnb.nn.Paramsabit )
a = isinstance(module._parameters[tensor_name], bnb.nn.IntaParams )
if is_abit or is_abit:
a = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
a = old_value.to(snake_case_ )
elif isinstance(snake_case_, torch.Tensor ):
a = value.to('''cpu''' )
if value.dtype == torch.inta:
a = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
a = torch.tensor(snake_case_, device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls, snake_case_ ) and fpaa_statistics is None:
a = new_value.T
a = old_value.__dict__
if is_abit:
a = bnb.nn.IntaParams(snake_case_, requires_grad=snake_case_, **snake_case_ ).to(snake_case_ )
elif is_abit:
a = bnb.nn.Paramsabit(snake_case_, requires_grad=snake_case_, **snake_case_ ).to(snake_case_ )
a = new_value
if fpaa_statistics is not None:
setattr(module.weight, '''SCB''', fpaa_statistics.to(snake_case_ ) )
else:
if value is None:
a = old_value.to(snake_case_ )
elif isinstance(snake_case_, torch.Tensor ):
a = value.to(snake_case_ )
else:
a = torch.tensor(snake_case_, device=snake_case_ )
if is_buffer:
a = new_value
else:
a = nn.Parameter(snake_case_, requires_grad=old_value.requires_grad )
a = new_value
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None, snake_case_=None, snake_case_=None, snake_case_=False ) -> Optional[int]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
a = []
current_key_name.append(snake_case_ )
if (isinstance(snake_case_, nn.Linear ) or isinstance(snake_case_, snake_case_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(snake_case_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(snake_case_, snake_case_ ):
a , a = module.weight.shape
else:
a = module.in_features
a = module.out_features
if quantization_config.quantization_method() == "llm_int8":
a = bnb.nn.LinearabitLt(
snake_case_, snake_case_, module.bias is not None, has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight, threshold=quantization_config.llm_inta_threshold, )
a = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
a = bnb.nn.Linearabit(
snake_case_, snake_case_, module.bias is not None, quantization_config.bnb_abit_compute_dtype, compress_statistics=quantization_config.bnb_abit_use_double_quant, quant_type=quantization_config.bnb_abit_quant_type, )
a = True
# Store the module class in case we need to transpose the weight later
a = type(snake_case_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(snake_case_ )
if len(list(module.children() ) ) > 0:
a , a = _replace_with_bnb_linear(
snake_case_, snake_case_, snake_case_, snake_case_, has_been_replaced=snake_case_, )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None, snake_case_=None, snake_case_=None ) -> Optional[Any]:
"""simple docstring"""
a = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
a , a = _replace_with_bnb_linear(
snake_case_, snake_case_, snake_case_, snake_case_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE__ ( *snake_case_, **snake_case_ ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''', snake_case_, )
return replace_with_bnb_linear(*snake_case_, **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( *snake_case_, **snake_case_ ) -> int:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''', snake_case_, )
return set_module_quantized_tensor_to_device(*snake_case_, **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any:
"""simple docstring"""
a = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
a = find_tied_parameters(snake_case_ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case_, snake_case_ ):
a = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
a = sum(snake_case_, [] )
a = len(snake_case_ ) > 0
# Check if it is a base model
a = not hasattr(snake_case_, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a = list(model.named_children() )
a = [list_modules[-1][0]]
# add last module together with tied weights
a = set(snake_case_ ) - set(snake_case_ )
a = list(set(snake_case_ ) ) + list(snake_case_ )
# remove ".weight" from the keys
a = ['''.weight''', '''.bias''']
a = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a = name.replace(snake_case_, '''''' )
filtered_module_names.append(snake_case_ )
return filtered_module_names
| 330 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
a = '''The dog is cute and lives in the garden house'''
a = jnp.array([tokenizer.encode(__lowerCamelCase )] )
a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
a = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 330 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase__ : List[Any] = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def SCREAMING_SNAKE_CASE__ ( snake_case_ = "dhaka", snake_case_ = 5 ) -> int:
"""simple docstring"""
a = min(snake_case_, 5_0 ) # Prevent abuse!
a = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
a = requests.get('''https://www.google.com/search''', params=snake_case_, headers=snake_case_ )
a = BeautifulSoup(html.text, '''html.parser''' )
a = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''', str(soup.select('''script''' ) ) ) )
a = json.dumps(snake_case_ )
a = json.loads(snake_case_ )
a = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''', snake_case_, )
if not matched_google_image_data:
return 0
a = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''', '''''', str(snake_case_ ), )
a = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''', snake_case_, )
for index, fixed_full_res_image in enumerate(snake_case_ ):
if index >= max_images:
return index
a = bytes(snake_case_, '''ascii''' ).decode(
'''unicode-escape''' )
a = bytes(snake_case_, '''ascii''' ).decode(
'''unicode-escape''' )
a = urllib.request.build_opener()
a = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(snake_case_ )
a = f"""query_{query.replace(" ", "_" )}"""
if not os.path.exists(snake_case_ ):
os.makedirs(snake_case_ )
urllib.request.urlretrieve( # noqa: S310
snake_case_, f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
UpperCamelCase__ : Dict = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print("""Please provide a search term.""")
raise
| 330 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ : Union[str, Any] = 16
UpperCamelCase__ : Dict = 32
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple:
"""simple docstring"""
a = AutoTokenizer.from_pretrained('''bert-base-cased''' )
a = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a = datasets.map(
snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a = 1_6
elif accelerator.mixed_precision != "no":
a = 8
else:
a = None
return tokenizer.pad(
snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', )
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
a = DataLoader(
tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ : int = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1":
a = 2
# Initialize accelerator
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config['''lr''']
a = int(config['''num_epochs'''] )
a = int(config['''seed'''] )
a = int(config['''batch_size'''] )
a = evaluate.load('''glue''', '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case_ )
def inner_training_loop(snake_case_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Instantiate optimizer
a = AdamW(params=model.parameters(), lr=snake_case_ )
a , a = get_dataloaders(snake_case_, snake_case_ )
# Instantiate scheduler
a = get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a = model(**snake_case_ )
a = outputs.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a = model(**snake_case_ )
a = outputs.logits.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", snake_case_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
a = parser.parse_args()
a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 330 | 1 |
import os
import pytest
from attr import dataclass
UpperCamelCase__ : int = """us-east-1""" # defaults region
@dataclass
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
SCREAMING_SNAKE_CASE_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
SCREAMING_SNAKE_CASE_ = {**hyperparameters, 'max_steps': 10_00}
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = SageMakerTestEnvironment(framework=request.cls.framework )
| 330 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
UpperCamelCase__ : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
a = getattr(snake_case_, snake_case_ )
if weight_type is not None:
a = getattr(snake_case_, snake_case_ ).shape
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = []
a = fairseq_model.state_dict()
a = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', )
a = True
else:
for key, mapped_key in MAPPING.items():
a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
a = True
if "*" in mapped_key:
a = name.split(snake_case_ )[0].split('''.''' )[-2]
a = mapped_key.replace('''*''', snake_case_ )
if "weight_g" in name:
a = '''weight_g'''
elif "weight_v" in name:
a = '''weight_v'''
elif "bias" in name:
a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = '''weight'''
else:
a = None
set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = full_name.split('''conv_layers.''' )[-1]
a = name.split('''.''' )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
a = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
a = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
a = UniSpeechSatConfig()
a = ''''''
if is_finetuned:
a = UniSpeechSatForCTC(snake_case_ )
else:
a = UniSpeechSatForPreTraining(snake_case_ )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
a = model[0].eval()
recursively_load_weights(snake_case_, snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase__ : int = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = SpeechTaTokenizer
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a = SpeechTaTokenizer(__lowerCamelCase )
a = AddedToken('''<mask>''' ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase )
a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Dict ):
'''simple docstring'''
a = '''this is a test'''
a = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str=False ,__lowerCamelCase : Tuple=20 ,__lowerCamelCase : Any=5 ):
'''simple docstring'''
a , a = self.get_input_output_texts(__lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
a = tokenizer.decode(__lowerCamelCase ,clean_up_tokenization_spaces=__lowerCamelCase )
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = '''<pad>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-4] ,'''œ''' )
self.assertEqual(vocab_keys[-2] ,'''<mask>''' )
self.assertEqual(vocab_keys[-1] ,'''<ctc_blank>''' )
self.assertEqual(len(__lowerCamelCase ) ,81 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = self.get_tokenizers(do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
a = tokenizer.vocab_size
a = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
a = tokenizer.add_tokens(__lowerCamelCase )
a = tokenizer.vocab_size
a = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase ,0 )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,len(__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase ,all_size + len(__lowerCamelCase ) )
a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' ,add_special_tokens=__lowerCamelCase )
self.assertGreaterEqual(len(__lowerCamelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
a = tokenizer.add_special_tokens(__lowerCamelCase )
a = tokenizer.vocab_size
a = len(__lowerCamelCase )
self.assertNotEqual(__lowerCamelCase ,0 )
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual(__lowerCamelCase ,len(__lowerCamelCase ) )
self.assertEqual(__lowerCamelCase ,all_size_a + len(__lowerCamelCase ) )
a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' ,add_special_tokens=__lowerCamelCase )
self.assertGreaterEqual(len(__lowerCamelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = self.get_tokenizer()
a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowerCamelCase ,[SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase ,[SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
# fmt: off
self.assertListEqual(__lowerCamelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
a = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase ,[SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''microsoft/speecht5_asr''' ,revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' ,sequences=__lowerCamelCase ,)
| 330 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
class lowerCamelCase_ :
def __init__( self : Dict ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = metric_id
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() )
@pytest.mark.parametrize(
'''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple:
"""simple docstring"""
if "tmp_path" in args:
a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case_ )
| 330 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( a_ ):
def __init__( self : str ,__lowerCamelCase : pyspark.sql.DataFrame ,__lowerCamelCase : Optional[NamedSplit] = None ,__lowerCamelCase : Optional[Features] = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : str = None ,__lowerCamelCase : bool = False ,__lowerCamelCase : str = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : str = "arrow" ,**__lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(
split=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,keep_in_memory=__lowerCamelCase ,streaming=__lowerCamelCase ,**__lowerCamelCase ,)
a = load_from_cache_file
a = file_format
a = Spark(
df=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,working_dir=__lowerCamelCase ,**__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCamelCase ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 330 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'luke'
def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 330 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ : int = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
UpperCamelCase__ : List[str] = {
"""camembert-base""": 512,
}
UpperCamelCase__ : List[Any] = """▁"""
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = CamembertTokenizer
def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : List[Any]=None ,__lowerCamelCase : List[Any]="<s>" ,__lowerCamelCase : Optional[Any]="</s>" ,__lowerCamelCase : Union[str, Any]="</s>" ,__lowerCamelCase : int="<s>" ,__lowerCamelCase : Any="<unk>" ,__lowerCamelCase : Optional[int]="<pad>" ,__lowerCamelCase : Optional[int]="<mask>" ,__lowerCamelCase : str=["<s>NOTUSED", "</s>NOTUSED"] ,**__lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
a = vocab_file
a = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 330 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase__ : Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase__ : List[Any] = df.iloc[:, 1:2]
UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1)
UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase__ : Optional[Any] = 10
UpperCamelCase__ : int = 5
UpperCamelCase__ : List[str] = 20
UpperCamelCase__ : Optional[int] = len_data - periods * look_back
UpperCamelCase__ : Union[str, Any] = actual_data[:division]
UpperCamelCase__ : str = actual_data[division - look_back :]
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], []
UpperCamelCase__ , UpperCamelCase__ : str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase__ : List[str] = np.array(train_x)
UpperCamelCase__ : Optional[Any] = np.array(test_x)
UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase__ : Union[str, Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase__ : Tuple = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase__ : Tuple = model.predict(x_test)
| 330 | 1 |
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__ : Optional[int] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
"""simple docstring"""
a = Github(os.environ['''GITHUB_TOKEN'''] )
a = g.get_repo('''huggingface/diffusers''' )
a = repo.get_issues(state='''open''' )
for issue in open_issues:
a = sorted(issue.get_comments(), key=lambda snake_case_ : i.created_at, reverse=snake_case_ )
a = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 330 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
a = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = '''a''' * 1_0_0_0 + '''.lock'''
a = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
a = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 330 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase__ : Any = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase__ : Optional[int] = {"""facebook/blenderbot_small-90M""": 512}
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any:
"""simple docstring"""
a = set()
a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a = char
a = set(snake_case_ )
return pairs
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Dict="__start__" ,__lowerCamelCase : Any="__end__" ,__lowerCamelCase : List[Any]="__unk__" ,__lowerCamelCase : Optional[Any]="__null__" ,**__lowerCamelCase : Tuple ,):
'''simple docstring'''
super().__init__(unk_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,**__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
a = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase ,encoding='''utf-8''' ) as merges_handle:
a = merges_handle.read().split('''\n''' )[1:-1]
a = [tuple(merge.split() ) for merge in merges]
a = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
a = {}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a = re.sub('''([.,!?()])''' ,r''' \1''' ,__lowerCamelCase )
a = re.sub('''(\')''' ,r''' \1 ''' ,__lowerCamelCase )
a = re.sub(r'''\s{2,}''' ,''' ''' ,__lowerCamelCase )
if "\n" in token:
a = token.replace('''\n''' ,''' __newln__''' )
a = token.split(''' ''' )
a = []
for token in tokens:
if not len(__lowerCamelCase ):
continue
a = token.lower()
a = tuple(__lowerCamelCase )
a = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
a = get_pairs(__lowerCamelCase )
if not pairs:
words.append(__lowerCamelCase )
continue
while True:
a = min(__lowerCamelCase ,key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
a , a = bigram
a = []
a = 0
while i < len(__lowerCamelCase ):
try:
a = word.index(__lowerCamelCase ,__lowerCamelCase )
new_word.extend(word[i:j] )
a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a = tuple(__lowerCamelCase )
a = new_word
if len(__lowerCamelCase ) == 1:
break
else:
a = get_pairs(__lowerCamelCase )
a = '''@@ '''.join(__lowerCamelCase )
a = word[:-4]
a = word
words.append(__lowerCamelCase )
return " ".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ):
'''simple docstring'''
a = []
a = re.findall(r'''\S+\n?''' ,__lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ):
'''simple docstring'''
a = token.lower()
return self.encoder.get(__lowerCamelCase ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ):
'''simple docstring'''
return self.decoder.get(__lowerCamelCase ,self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : List[str] ):
'''simple docstring'''
a = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowerCamelCase ,ensure_ascii=__lowerCamelCase ) + '''\n''' )
a = 0
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
a = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 330 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'vit_mae'
def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = decoder_num_attention_heads
a = decoder_hidden_size
a = decoder_num_hidden_layers
a = decoder_intermediate_size
a = mask_ratio
a = norm_pix_loss
| 330 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.