code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( __A ):
__A : Optional[Any] = "encoder-decoder"
__A : Dict = True
def __init__( self : str , **lowercase_ : int ) -> Optional[int]:
super().__init__(**lowercase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ : Tuple = kwargs.pop("encoder" )
lowercase__ : List[str] = encoder_config.pop("model_type" )
lowercase__ : Optional[int] = kwargs.pop("decoder" )
lowercase__ : Optional[int] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
lowercase__ : str = AutoConfig.for_model(lowercase_ , **lowercase_ )
lowercase__ : Any = AutoConfig.for_model(lowercase_ , **lowercase_ )
lowercase__ : Dict = True
@classmethod
def __UpperCamelCase ( cls : str , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : List[Any] ) -> PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : Dict = self.encoder.to_dict()
lowercase__ : Tuple = self.decoder.to_dict()
lowercase__ : str = self.__class__.model_type
return output
| 333 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case_ ( __A ):
__A : Any = ["image_processor", "tokenizer"]
__A : str = "OwlViTImageProcessor"
__A : Optional[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : int , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , **lowercase_ : Any ) -> str:
lowercase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Any = kwargs.pop("feature_extractor" )
lowercase__ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : List[Any] , lowercase_ : List[Any]=None , lowercase_ : str=None , lowercase_ : Union[str, Any]=None , lowercase_ : int="max_length" , lowercase_ : List[str]="np" , **lowercase_ : Any ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowercase_ , lowercase_ ) or (isinstance(lowercase_ , lowercase_ ) and not isinstance(text[0] , lowercase_ )):
lowercase__ : Optional[int] = [self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(text[0] , lowercase_ ):
lowercase__ : str = []
# Maximum number of queries across batch
lowercase__ : int = max([len(lowercase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase_ ) != max_num_queries:
lowercase__ : Tuple = t + [" "] * (max_num_queries - len(lowercase_ ))
lowercase__ : Optional[int] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
encodings.append(lowercase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowercase__ : Dict = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowercase__ : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase__ : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowercase__ : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase__ : List[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowercase__ : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase__ : str = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowercase__ : int = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowercase__ : int = BatchEncoding()
lowercase__ : Optional[Any] = input_ids
lowercase__ : Dict = attention_mask
if query_images is not None:
lowercase__ : Any = BatchEncoding()
lowercase__ : Union[str, Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ ).pixel_values
lowercase__ : int = query_pixel_values
if images is not None:
lowercase__ : List[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowercase__ : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : int ) -> List[Any]:
return self.image_processor.post_process(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , *lowercase_ : str , **lowercase_ : Optional[Any] ) -> Optional[int]:
return self.image_processor.post_process_object_detection(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : str , *lowercase_ : Dict , **lowercase_ : str ) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , *lowercase_ : str , **lowercase_ : List[Any] ) -> int:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 333 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ ( __A ,unittest.TestCase ):
__A : Dict = UnCLIPImageVariationPipeline
__A : Optional[Any] = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
__A : str = IMAGE_VARIATION_BATCH_PARAMS
__A : Optional[Any] = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
__A : Optional[Any] = False
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
return 32
@property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __UpperCamelCase ( self : str ) -> Any:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
return 1_00
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __UpperCamelCase ( self : Any ) -> Any:
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase_ )
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
lowercase__ : Tuple = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
lowercase__ : Any = UnCLIPTextProjModel(**lowercase_ )
return model
@property
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : int = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
lowercase__ : List[Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowercase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
lowercase__ : List[str] = self.dummy_decoder
lowercase__ : List[Any] = self.dummy_text_proj
lowercase__ : Optional[Any] = self.dummy_text_encoder
lowercase__ : Union[str, Any] = self.dummy_tokenizer
lowercase__ : Dict = self.dummy_super_res_first
lowercase__ : Tuple = self.dummy_super_res_last
lowercase__ : Dict = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=10_00 , )
lowercase__ : Any = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=10_00 , )
lowercase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowercase__ : Union[str, Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any=0 , lowercase_ : Any=True ) -> Dict:
lowercase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("mps" ):
lowercase__ : int = torch.manual_seed(lowercase_ )
else:
lowercase__ : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
if pil_image:
lowercase__ : List[str] = input_image * 0.5 + 0.5
lowercase__ : Union[str, Any] = input_image.clamp(0 , 1 )
lowercase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(lowercase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __UpperCamelCase ( self : int ) -> Tuple:
lowercase__ : Tuple = "cpu"
lowercase__ : Tuple = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**lowercase_ )
lowercase__ : Optional[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[int] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : Optional[int] = pipe(**lowercase_ )
lowercase__ : int = output.images
lowercase__ : Optional[Any] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : List[Any] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Dict = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Any ) -> Any:
lowercase__ : Tuple = "cpu"
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Union[str, Any] = self.pipeline_class(**lowercase_ )
lowercase__ : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : int = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : Optional[int] = pipe(**lowercase_ )
lowercase__ : Any = output.images
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : str = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
lowercase__ : str = image[0, -3:, -3:, -1]
lowercase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Tuple = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
lowercase__ : Tuple = "cpu"
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**lowercase_ )
lowercase__ : List[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : int = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
lowercase__ : Optional[int] = pipe(**lowercase_ )
lowercase__ : List[Any] = output.images
lowercase__ : Any = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : List[Any] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
lowercase__ : Union[str, Any] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowercase__ : Optional[int] = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : int = torch.device("cpu" )
class snake_case_ :
__A : Optional[int] = 1
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**lowercase_ )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowercase__ : Optional[Any] = pipe.decoder.dtype
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowercase__ : List[Any] = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
lowercase__ : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowercase__ : Optional[int] = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
lowercase__ : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : List[Any] = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ ).images
lowercase__ : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
# Don't pass image, instead pass embedding
lowercase__ : Tuple = pipeline_inputs.pop("image" )
lowercase__ : Tuple = pipe.image_encoder(lowercase_ ).image_embeds
lowercase__ : str = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ , image_embeddings=lowercase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __UpperCamelCase ( self : int ) -> Optional[int]:
lowercase__ : List[str] = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowercase__ : List[Any] = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , expected_max_diff=lowercase_ )
@skip_mps
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : Optional[int] = torch_device == "cpu"
lowercase__ : List[str] = True
lowercase__ : Union[str, Any] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : Dict = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowercase__ : List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase_ )
@skip_mps
def __UpperCamelCase ( self : int ) -> Optional[int]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return super().test_save_load_local()
@skip_mps
def __UpperCamelCase ( self : Any ) -> Dict:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
lowercase__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
lowercase__ : List[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
lowercase__ : List[str] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[Any] = pipeline(
lowercase_ , generator=lowercase_ , output_type="np" , )
lowercase__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ , 15 )
| 333 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCamelCase = random.Random()
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Dict=1.0 , _lowerCamelCase : Any=None , _lowerCamelCase : Dict=None):
if rng is None:
lowercase__ : str = global_rng
lowercase__ : Optional[Any] = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class snake_case_ ( unittest.TestCase ):
def __init__( self : int , lowercase_ : int , lowercase_ : Any=7 , lowercase_ : Any=4_00 , lowercase_ : Any=20_00 , lowercase_ : str=24 , lowercase_ : Union[str, Any]=24 , lowercase_ : List[str]=0.0 , lowercase_ : Union[str, Any]=1_60_00 , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=True , ) -> Optional[int]:
lowercase__ : Optional[Any] = parent
lowercase__ : int = batch_size
lowercase__ : str = min_seq_length
lowercase__ : Union[str, Any] = max_seq_length
lowercase__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ : Optional[int] = feature_size
lowercase__ : List[Any] = num_mel_bins
lowercase__ : Optional[int] = padding_value
lowercase__ : Union[str, Any] = sampling_rate
lowercase__ : Any = return_attention_mask
lowercase__ : List[str] = do_normalize
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : List[Any] , lowercase_ : int=False , lowercase_ : Optional[int]=False ) -> str:
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
lowercase__ : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__ : str = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( __A ,unittest.TestCase ):
__A : Tuple = SpeechaTextFeatureExtractor if is_speech_available() else None
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : Dict = SpeechaTextFeatureExtractionTester(self )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[int] ) -> Dict:
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1E-3 ) )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : Optional[int] = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
lowercase__ : List[str] = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowercase__ : List[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
lowercase__ : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
lowercase__ : Dict = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : Dict = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ : Any = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowercase__ : Dict = np.asarray(lowercase_ )
lowercase__ : int = feature_extractor(lowercase_ , return_tensors="np" ).input_features
lowercase__ : List[str] = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : int = ["longest", "max_length", "do_not_pad"]
lowercase__ : Union[str, Any] = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase__ : List[Any] = feature_extractor(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_ )
lowercase__ : Union[str, Any] = inputs.input_features
lowercase__ : Dict = inputs.attention_mask
lowercase__ : List[Any] = [np.sum(lowercase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : List[Any] = ["longest", "max_length", "do_not_pad"]
lowercase__ : int = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
lowercase__ : Tuple = feature_extractor(
lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ )
lowercase__ : List[Any] = inputs.input_features
lowercase__ : Any = inputs.attention_mask
lowercase__ : List[Any] = [np.sum(lowercase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
lowercase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : str = feature_extractor(
lowercase_ , padding="max_length" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
lowercase__ : Tuple = inputs.input_features
lowercase__ : Tuple = inputs.attention_mask
lowercase__ : Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __UpperCamelCase ( self : List[Any] ) -> str:
lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Dict = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : Tuple = feature_extractor(
lowercase_ , padding="longest" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
lowercase__ : Dict = inputs.input_features
lowercase__ : List[Any] = inputs.attention_mask
lowercase__ : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowercase__ : Any = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowercase__ : Optional[int] = feature_extractor(
lowercase_ , padding="longest" , max_length=16 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
lowercase__ : List[Any] = inputs.input_features
lowercase__ : Union[str, Any] = inputs.attention_mask
lowercase__ : List[str] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
import torch
lowercase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : str = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowercase__ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ : List[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase__ : int = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __UpperCamelCase ( self : Dict , lowercase_ : str ) -> List[str]:
from datasets import load_dataset
lowercase__ : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowercase__ : Optional[Any] = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __UpperCamelCase ( self : str ) -> str:
# fmt: off
lowercase__ : Optional[int] = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
lowercase__ : List[str] = self._load_datasamples(1 )
lowercase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ : Optional[int] = feature_extractor(lowercase_ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
| 333 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 1 |
def lowercase_ ( ):
return [list(range(1000 - i , -1000 - i , -1)) for i in range(1000)]
UpperCamelCase = generate_large_matrix()
UpperCamelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowercase_ ( _lowerCamelCase : list[list[int]]):
assert all(row == sorted(_lowerCamelCase , reverse=_lowerCamelCase) for row in grid)
assert all(list(_lowerCamelCase) == sorted(_lowerCamelCase , reverse=_lowerCamelCase) for col in zip(*_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : list[int]):
lowercase__ : Tuple = 0
lowercase__ : str = len(_lowerCamelCase) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase__ : int = (left + right) // 2
lowercase__ : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase__ : Optional[Any] = mid + 1
else:
lowercase__ : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : list[list[int]]):
lowercase__ : str = 0
lowercase__ : Optional[Any] = len(grid[0])
for i in range(len(_lowerCamelCase)):
lowercase__ : List[Any] = find_negative_index(grid[i][:bound])
total += bound
return (len(_lowerCamelCase) * len(grid[0])) - total
def lowercase_ ( _lowerCamelCase : list[list[int]]):
return len([number for row in grid for number in row if number < 0])
def lowercase_ ( _lowerCamelCase : list[list[int]]):
lowercase__ : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowerCamelCase):
if number < 0:
total += len(_lowerCamelCase) - i
break
return total
def lowercase_ ( ):
from timeit import timeit
print("Running benchmarks")
lowercase__ : Optional[int] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase__ : Any = timeit(f'''{func}(grid=grid)''' , setup=_lowerCamelCase , number=500)
print(f'''{func}() took {time:0.4f} seconds''')
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
UpperCamelCase = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
UpperCamelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
UpperCamelCase = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
UpperCamelCase = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
UpperCamelCase = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Tuple = FLAX_MODEL_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModel)
class snake_case_ ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class snake_case_ ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class snake_case_ ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class snake_case_ ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class snake_case_ ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class snake_case_ ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class snake_case_ ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class snake_case_ ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 333 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : str = 384
if "tiny" in model_name:
lowercase__ : Optional[int] = [3, 3, 9, 3]
lowercase__ : str = [96, 192, 384, 768]
if "small" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 27, 3]
lowercase__ : List[str] = [96, 192, 384, 768]
if "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 27, 3]
lowercase__ : Dict = [128, 256, 512, 1024]
lowercase__ : Union[str, Any] = 512
if "large" in model_name:
lowercase__ : Tuple = [3, 3, 27, 3]
lowercase__ : str = [192, 384, 768, 1536]
lowercase__ : Dict = 768
if "xlarge" in model_name:
lowercase__ : List[Any] = [3, 3, 27, 3]
lowercase__ : str = [256, 512, 1024, 2048]
lowercase__ : Optional[int] = 1024
# set label information
lowercase__ : Optional[int] = 150
lowercase__ : Tuple = "huggingface/label-files"
lowercase__ : Optional[int] = "ade20k-id2label.json"
lowercase__ : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset") , "r"))
lowercase__ : str = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = ConvNextConfig(
depths=_lowerCamelCase , hidden_sizes=_lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"])
lowercase__ : Tuple = UperNetConfig(
backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : str = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight"))
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias"))
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight"))
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight'''))
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias'''))
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight'''))
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias'''))
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight'''))
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias'''))
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight'''))
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias'''))
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
])
# fmt: on
return rename_keys
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : List[Any] = dct.pop(_lowerCamelCase)
lowercase__ : str = val
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str):
lowercase__ : Any = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
lowercase__ : int = model_name_to_url[model_name]
lowercase__ : str = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")["state_dict"]
lowercase__ : int = get_upernet_config(_lowerCamelCase)
lowercase__ : Optional[int] = UperNetForSemanticSegmentation(_lowerCamelCase)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ : Union[str, Any] = state_dict.pop(_lowerCamelCase)
if "bn" in key:
lowercase__ : List[str] = key.replace("bn" , "batch_norm")
lowercase__ : Optional[int] = val
# rename keys
lowercase__ : Optional[int] = create_rename_keys(_lowerCamelCase)
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
# verify on image
lowercase__ : Any = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowercase__ : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw).convert("RGB")
lowercase__ : Union[str, Any] = SegformerImageProcessor()
lowercase__ : Tuple = processor(_lowerCamelCase , return_tensors="pt").pixel_values
with torch.no_grad():
lowercase__ : Optional[Any] = model(_lowerCamelCase)
if model_name == "upernet-convnext-tiny":
lowercase__ : List[Any] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]])
elif model_name == "upernet-convnext-small":
lowercase__ : Tuple = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]])
elif model_name == "upernet-convnext-base":
lowercase__ : Optional[Any] = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]])
elif model_name == "upernet-convnext-large":
lowercase__ : Any = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]])
elif model_name == "upernet-convnext-xlarge":
lowercase__ : int = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]])
print("Logits:" , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''')
model.push_to_hub(f'''openmmlab/{model_name}''')
processor.push_to_hub(f'''openmmlab/{model_name}''')
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f"upernet-convnext-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 333 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase__ : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
sd_pipe.set_scheduler("sample_euler" )
lowercase__ : Dict = "A painting of a squirrel eating a burger"
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : List[Any] = sd_pipe([prompt] , generator=lowercase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowercase__ : Dict = output.images
lowercase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Any = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : str ) -> Dict:
lowercase__ : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
sd_pipe.set_scheduler("sample_euler" )
lowercase__ : Optional[Any] = "A painting of a squirrel eating a burger"
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : str = sd_pipe([prompt] , generator=lowercase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowercase__ : List[Any] = output.images
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Union[str, Any] = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ : int = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
lowercase__ : Any = "A painting of a squirrel eating a burger"
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : List[Any] = sd_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=lowercase_ , )
lowercase__ : Any = output.images
lowercase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : List[str] = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 333 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = False
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
UpperCamelCase = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
UpperCamelCase = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
UpperCamelCase = reader.read()
UpperCamelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
UpperCamelCase = UNetaDModel(**config)
else:
UpperCamelCase = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
UpperCamelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCamelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCamelCase = config[key]
del config[key]
UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
UpperCamelCase = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
UpperCamelCase = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
UpperCamelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
UpperCamelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
UpperCamelCase = param_value
UpperCamelCase = True
if not has_changed:
UpperCamelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
UpperCamelCase = 8.314_462 # Unit - J mol-1 K-1
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value.")
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value.")
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
UpperCamelCase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
lowercase__ : int = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowercase__ : str = bs[:]
lowercase__ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(_lowerCamelCase)
cs.append(2**8 + n)
n += 1
lowercase__ : int = [chr(_lowerCamelCase) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase))
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowercase__ : Tuple = char
return pairs
class snake_case_ ( __A ):
__A : int = VOCAB_FILES_NAMES
__A : int = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int="replace" , lowercase_ : Optional[int]="<s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : int="</s>" , lowercase_ : str="<s>" , lowercase_ : int="<unk>" , lowercase_ : int="<pad>" , lowercase_ : Union[str, Any]="<mask>" , lowercase_ : List[str]=False , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
lowercase__ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
lowercase__ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
lowercase__ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
lowercase__ : Dict = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
lowercase__ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
lowercase__ : List[str] = json.load(lowercase_ )
lowercase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowercase__ : Optional[int] = errors # how to handle errors in decoding
lowercase__ : str = bytes_to_unicode()
lowercase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding="utf-8" ) as merges_handle:
lowercase__ : Any = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : List[str] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : int = {}
lowercase__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : Tuple = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : Dict , lowercase_ : Dict ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowercase__ : Union[str, Any] = tuple(lowercase_ )
lowercase__ : List[Any] = get_pairs(lowercase_ )
if not pairs:
return token
while True:
lowercase__ : Any = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Union[str, Any] = bigram
lowercase__ : Optional[Any] = []
lowercase__ : str = 0
while i < len(lowercase_ ):
try:
lowercase__ : str = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Any = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : List[Any] = tuple(lowercase_ )
lowercase__ : str = new_word
if len(lowercase_ ) == 1:
break
else:
lowercase__ : int = get_pairs(lowercase_ )
lowercase__ : List[str] = " ".join(lowercase_ )
lowercase__ : Tuple = word
return word
def __UpperCamelCase ( self : str , lowercase_ : Optional[int] ) -> List[Any]:
lowercase__ : Optional[Any] = []
for token in re.findall(self.pat , lowercase_ ):
lowercase__ : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> Union[str, Any]:
return self.decoder.get(lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
lowercase__ : Dict = "".join(lowercase_ )
lowercase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
lowercase__ : Optional[Any] = 0
with open(lowercase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ : str = token_index
writer.write(" ".join(lowercase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : str=False , **lowercase_ : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
lowercase__ : Optional[Any] = " " + text
return (text, kwargs)
def __UpperCamelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> str:
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : List[str] , lowercase_ : "Conversation" ) -> List[int]:
lowercase__ : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowercase_ )
lowercase__ : Optional[Any] = " ".join(lowercase_ )
lowercase__ : int = self.encode(lowercase_ )
if len(lowercase_ ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 333 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowercase_ ( _lowerCamelCase : int=None):
if subparsers is not None:
lowercase__ : Dict = subparsers.add_parser("test")
else:
lowercase__ : List[str] = argparse.ArgumentParser("Accelerate test command")
parser.add_argument(
"--config_file" , default=_lowerCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase)
return parser
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : List[str] = os.path.sep.join(__file__.split(os.path.sep)[:-2] + ["test_utils", "scripts", "test_script.py"])
if args.config_file is None:
lowercase__ : Union[str, Any] = script_name
else:
lowercase__ : Union[str, Any] = f'''--config_file={args.config_file} {script_name}'''
lowercase__ : List[str] = ["accelerate-launch"] + test_args.split()
lowercase__ : Optional[Any] = execute_subprocess_async(_lowerCamelCase , env=os.environ.copy())
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!")
def lowercase_ ( ):
lowercase__ : int = test_command_parser()
lowercase__ : Optional[int] = parser.parse_args()
test_command(_lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = '''src/diffusers'''
UpperCamelCase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase = spec.loader.load_module()
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]):
return line.startswith(_lowerCamelCase) or len(_lowerCamelCase) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , _lowerCamelCase) is not None
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
lowercase__ : Tuple = object_name.split(".")
lowercase__ : int = 0
# First let's find the module where our object lives.
lowercase__ : List[Any] = parts[i]
while i < len(_lowerCamelCase) and not os.path.isfile(os.path.join(_lowerCamelCase , f'''{module}.py''')):
i += 1
if i < len(_lowerCamelCase):
lowercase__ : Optional[int] = os.path.join(_lowerCamelCase , parts[i])
if i >= len(_lowerCamelCase):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(_lowerCamelCase , f'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowercase__ : str = f.readlines()
# Now let's find the class / func in the code!
lowercase__ : Dict = ""
lowercase__ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCamelCase) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCamelCase):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase__ : int = line_index
while line_index < len(_lowerCamelCase) and _should_continue(lines[line_index] , _lowerCamelCase):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowercase__ : str = lines[start_index:line_index]
return "".join(_lowerCamelCase)
UpperCamelCase = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
UpperCamelCase = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
UpperCamelCase = re.compile(R'''<FILL\s+[^>]*>''')
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : List[str] = code.split("\n")
lowercase__ : Any = 0
while idx < len(_lowerCamelCase) and len(lines[idx]) == 0:
idx += 1
if idx < len(_lowerCamelCase):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = len(get_indent(_lowerCamelCase)) > 0
if has_indent:
lowercase__ : List[str] = f'''class Bla:\n{code}'''
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_lowerCamelCase)
lowercase__ : Tuple = black.format_str(_lowerCamelCase , mode=_lowerCamelCase)
lowercase__ , lowercase__ : Dict = style_docstrings_in_code(_lowerCamelCase)
return result[len("class Bla:\n") :] if has_indent else result
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False):
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n") as f:
lowercase__ : Optional[int] = f.readlines()
lowercase__ : Optional[Any] = []
lowercase__ : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCamelCase):
lowercase__ : Dict = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase__ , lowercase__ , lowercase__ : Any = search.groups()
lowercase__ : Dict = find_code_in_diffusers(_lowerCamelCase)
lowercase__ : Optional[Any] = get_indent(_lowerCamelCase)
lowercase__ : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase__ : List[str] = theoretical_indent
lowercase__ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase__ : str = True
while line_index < len(_lowerCamelCase) and should_continue:
line_index += 1
if line_index >= len(_lowerCamelCase):
break
lowercase__ : Dict = lines[line_index]
lowercase__ : List[str] = _should_continue(_lowerCamelCase , _lowerCamelCase) and re.search(f'''^{indent}# End copy''' , _lowerCamelCase) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowercase__ : Any = lines[start_index:line_index]
lowercase__ : List[Any] = "".join(_lowerCamelCase)
# Remove any nested `Copied from` comments to avoid circular copies
lowercase__ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(_lowerCamelCase) is None]
lowercase__ : Optional[Any] = "\n".join(_lowerCamelCase)
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCamelCase) > 0:
lowercase__ : Dict = replace_pattern.replace("with" , "").split(",")
lowercase__ : Any = [_re_replace_pattern.search(_lowerCamelCase) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase__ , lowercase__ , lowercase__ : int = pattern.groups()
lowercase__ : List[str] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if option.strip() == "all-casing":
lowercase__ : Optional[Any] = re.sub(obja.lower() , obja.lower() , _lowerCamelCase)
lowercase__ : int = re.sub(obja.upper() , obja.upper() , _lowerCamelCase)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase__ : Dict = blackify(lines[start_index - 1] + theoretical_code)
lowercase__ : Tuple = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowercase__ : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase__ : Optional[int] = start_index + 1
if overwrite and len(_lowerCamelCase) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''')
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(_lowerCamelCase)
return diffs
def lowercase_ ( _lowerCamelCase : bool = False):
lowercase__ : Optional[Any] = glob.glob(os.path.join(_lowerCamelCase , "**/*.py") , recursive=_lowerCamelCase)
lowercase__ : str = []
for filename in all_files:
lowercase__ : List[str] = is_copy_consistent(_lowerCamelCase , _lowerCamelCase)
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_lowerCamelCase) > 0:
lowercase__ : Tuple = "\n".join(_lowerCamelCase)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 333 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : Dict = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : Dict = 4.0 # Largest value for most examples
lowercase__ : str = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=(-1.0, 1.0) , lowercase_ : Optional[int]=False ) -> Any:
lowercase__ , lowercase__ : Dict = output_range
if clip:
lowercase__ : List[str] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : int = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : Union[str, Any]=(-1.0, 1.0) , lowercase_ : Optional[Any]=False ) -> Dict:
lowercase__ , lowercase__ : Optional[Any] = input_range
lowercase__ : str = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str ) -> List[Any]:
lowercase__ : List[str] = input_tokens > 0
lowercase__ , lowercase__ : Optional[Any] = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int ) -> int:
lowercase__ : Tuple = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : List[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : Dict = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : str = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : Tuple = ones
lowercase__ : int = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : Union[str, Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : int = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Any = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : str = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : List[Any] = mel.cpu().float().numpy()
lowercase__ : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 333 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : int):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCamelCase)
if number < 0:
return False
lowercase__ : int = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase__ : int = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(lowercase_ )
from datasets import load_dataset
lowercase__ : str = load_dataset("nielsr/rvlcdip-demo" )
lowercase__ : int = dataset["train"][0]["image"].convert("RGB" )
lowercase__ : Tuple = image_processor(lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**lowercase_ )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase_ )
lowercase__ : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowercase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 1 |
UpperCamelCase = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase = ['''a''', '''b''', '''c''', '''d''', '''e''']
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple):
lowercase__ : Any = start
# add current to visited
visited.append(_lowerCamelCase)
lowercase__ : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase__ : Optional[Any] = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# if all neighbors visited add current to sort
sort.append(_lowerCamelCase)
# if all vertices haven't been visited select a new one to visit
if len(_lowerCamelCase) != len(_lowerCamelCase):
for vertice in vertices:
if vertice not in visited:
lowercase__ : Optional[Any] = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase = topological_sort('''a''', [], [])
print(sort)
| 333 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class snake_case_ ( __A ):
def __init__( self : List[Any] , **lowercase_ : Union[str, Any] ) -> Optional[int]:
super().__init__(**lowercase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict , lowercase_ : Union[str, List[str], "Image", List["Image"]] , **lowercase_ : Any ) -> Optional[int]:
return super().__call__(lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Dict , **lowercase_ : int ) -> Optional[int]:
lowercase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowercase__ : Optional[Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowercase__ : Optional[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Any="This is a photo of {}." ) -> List[str]:
lowercase__ : Tuple = load_image(lowercase_ )
lowercase__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Union[str, Any] = candidate_labels
lowercase__ : List[str] = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
lowercase__ : List[Any] = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
lowercase__ : Optional[int] = [text_inputs]
return inputs
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str ) -> Tuple:
lowercase__ : int = model_inputs.pop("candidate_labels" )
lowercase__ : Dict = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , lowercase_ ):
lowercase__ : Any = text_inputs[0]
else:
# Batching case.
lowercase__ : int = text_inputs[0][0]
lowercase__ : Union[str, Any] = self.model(**lowercase_ , **lowercase_ )
lowercase__ : List[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __UpperCamelCase ( self : List[str] , lowercase_ : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[Any] = model_outputs.pop("candidate_labels" )
lowercase__ : Dict = model_outputs["logits"][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Optional[Any] = probs.tolist()
if not isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[str] = [scores]
elif self.framework == "tf":
lowercase__ : str = stable_softmax(lowercase_ , axis=-1 )
lowercase__ : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase__ : Union[str, Any] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 333 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ , lowercase__ : Optional[Any] = image.size
lowercase__ , lowercase__ : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : int = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"])
lowercase__ : Union[str, Any] = np.array(_lowerCamelCase).astype(np.floataa) / 255.0
lowercase__ : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2)
lowercase__ : Dict = torch.from_numpy(_lowerCamelCase)
return 2.0 * image - 1.0
class snake_case_ ( __A ):
def __init__( self : str , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Tuple , lowercase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 1_00 , lowercase_ : Optional[float] = 0.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowercase_ , PIL.Image.Image ):
lowercase__ : Any = 1
elif isinstance(lowercase_ , torch.Tensor ):
lowercase__ : List[Any] = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase_ )}''' )
if isinstance(lowercase_ , PIL.Image.Image ):
lowercase__ : str = preprocess(lowercase_ )
lowercase__ , lowercase__ : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ : Any = next(self.unet.parameters() ).dtype
lowercase__ : Optional[int] = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
lowercase__ : Tuple = image.to(device=self.device , dtype=lowercase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowercase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : List[str] = {}
if accepts_eta:
lowercase__ : int = eta
for t in self.progress_bar(lowercase_ ):
# concat latents and low resolution image in the channel dimension.
lowercase__ : Optional[int] = torch.cat([latents, image] , dim=1 )
lowercase__ : Optional[int] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase__ : Tuple = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VQVAE
lowercase__ : Union[str, Any] = self.vqvae.decode(lowercase_ ).sample
lowercase__ : str = torch.clamp(lowercase_ , -1.0 , 1.0 )
lowercase__ : Any = image / 2 + 0.5
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Optional[Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 333 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : List[Any] = "efficientnet"
def __init__( self : Optional[Any] , lowercase_ : int = 3 , lowercase_ : int = 6_00 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowercase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 25_60 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.0_01 , lowercase_ : float = 0.99 , lowercase_ : float = 0.5 , lowercase_ : float = 0.2 , **lowercase_ : Optional[int] , ) -> List[Any]:
super().__init__(**lowercase_ )
lowercase__ : Tuple = num_channels
lowercase__ : Optional[Any] = image_size
lowercase__ : List[Any] = width_coefficient
lowercase__ : List[str] = depth_coefficient
lowercase__ : Optional[Any] = depth_divisor
lowercase__ : Dict = kernel_sizes
lowercase__ : List[str] = in_channels
lowercase__ : Dict = out_channels
lowercase__ : str = depthwise_padding
lowercase__ : int = strides
lowercase__ : int = num_block_repeats
lowercase__ : int = expand_ratios
lowercase__ : Any = squeeze_expansion_ratio
lowercase__ : List[Any] = hidden_act
lowercase__ : int = hidden_dim
lowercase__ : Optional[Any] = pooling_type
lowercase__ : str = initializer_range
lowercase__ : Optional[Any] = batch_norm_eps
lowercase__ : Dict = batch_norm_momentum
lowercase__ : List[str] = dropout_rate
lowercase__ : Optional[int] = drop_connect_rate
lowercase__ : Union[str, Any] = sum(lowercase_ ) * 4
class snake_case_ ( __A ):
__A : Union[str, Any] = version.parse("1.11" )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __UpperCamelCase ( self : List[Any] ) -> float:
return 1E-5
| 333 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : Optional[Any] = StableDiffusionSAGPipeline
__A : Optional[Any] = TEXT_TO_IMAGE_PARAMS
__A : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : List[Any] = False
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase__ : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase__ : Union[str, Any] = CLIPTextModel(lowercase_ )
lowercase__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __UpperCamelCase ( self : Tuple , lowercase_ : Any , lowercase_ : int=0 ) -> str:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[int] = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
lowercase__ : Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase__ : Dict = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = "."
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : int = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase__ : Optional[int] = output.images
lowercase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Any = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : Tuple = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ : List[Any] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Union[str, Any] = "."
lowercase__ : Tuple = torch.manual_seed(0 )
lowercase__ : Any = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
lowercase__ : int = output.images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ : Dict = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : List[str] = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ : Tuple = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Dict = "."
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : Any = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
lowercase__ : Dict = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 333 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class snake_case_ :
def __init__( self : List[Any] , lowercase_ : int ) -> None:
lowercase__ : Dict = value
lowercase__ : Node | None = None
lowercase__ : Node | None = None
class snake_case_ :
def __init__( self : Union[str, Any] , lowercase_ : Node ) -> None:
lowercase__ : Tuple = tree
def __UpperCamelCase ( self : int , lowercase_ : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 1 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int):
lowercase__ , lowercase__ : Dict = position
lowercase__ : int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase__ : Union[str, Any] = []
for position in positions:
lowercase__ , lowercase__ : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase)
return permissible_positions
def lowercase_ ( _lowerCamelCase : list[list[int]]):
return not any(elem == 0 for row in board for elem in row)
def lowercase_ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int):
if is_complete(_lowerCamelCase):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase)):
lowercase__ , lowercase__ : Union[str, Any] = position
if board[y][x] == 0:
lowercase__ : Any = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1):
return True
lowercase__ : Union[str, Any] = 0
return False
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = [[0 for i in range(_lowerCamelCase)] for j in range(_lowerCamelCase)]
for i in range(_lowerCamelCase):
for j in range(_lowerCamelCase):
lowercase__ : Optional[Any] = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1):
return board
lowercase__ : List[Any] = 0
lowercase__ : Tuple = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 1 |
import argparse
import struct
import unittest
class snake_case_ :
def __init__( self : Tuple , lowercase_ : bytes ) -> None:
lowercase__ : Optional[int] = data
# Initialize hash values
lowercase__ : Optional[Any] = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
lowercase__ : Optional[Any] = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
lowercase__ : Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __UpperCamelCase ( lowercase_ : bytes ) -> bytes:
lowercase__ : List[Any] = b"\x80" + (b"\x00" * (63 - (len(lowercase_ ) + 8) % 64))
lowercase__ : int = struct.pack(">Q" , (len(lowercase_ ) * 8) )
return data + padding + big_endian_integer
def __UpperCamelCase ( self : Optional[Any] ) -> None:
# Convert into blocks of 64 bytes
lowercase__ : Tuple = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase__ : int = list(struct.unpack(">16L" , lowercase_ ) )
# add 48 0-ed integers
words += [0] * 48
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase__ : Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowercase__ : int = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowercase__ : int = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowercase__ : str = self.ror(lowercase_ , 6 ) ^ self.ror(lowercase_ , 11 ) ^ self.ror(lowercase_ , 25 )
lowercase__ : List[str] = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
lowercase__ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowercase__ : Union[str, Any] = self.ror(lowercase_ , 2 ) ^ self.ror(lowercase_ , 13 ) ^ self.ror(lowercase_ , 22 )
lowercase__ : List[Any] = (a & b) ^ (a & c) ^ (b & c)
lowercase__ : Union[str, Any] = (sa + maj) % 0x1_00_00_00_00
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowercase__ : int = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase__ : Tuple = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowercase__ : List[Any] = "".join([hex(lowercase_ )[2:].zfill(8 ) for value in self.hashes] )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : int ) -> int:
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ) -> None:
import hashlib
lowercase__ : Optional[int] = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(lowercase_ ).hash , hashlib.shaaaa(lowercase_ ).hexdigest() )
def lowercase_ ( ):
import doctest
doctest.testmod()
lowercase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file")
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Optional[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb") as f:
lowercase__ : Optional[int] = f.read()
else:
lowercase__ : Union[str, Any] = bytes(_lowerCamelCase , "utf-8")
print(SHAaaa(_lowerCamelCase).hash)
if __name__ == "__main__":
main()
| 333 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 1 |
from torch import nn
class snake_case_ ( nn.Module ):
def __init__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple ) -> str:
super().__init__()
lowercase__ : List[str] = class_size
lowercase__ : Optional[int] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowercase__ : Union[str, Any] = nn.Linear(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[int] ) -> Optional[Any]:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowercase__ : str = self.mlp(lowercase_ )
return logits
| 333 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[int] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]):
lowercase__ : List[str] = tmp_path / "cache"
lowercase__ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : List[Any] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : List[Any] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int):
lowercase__ : Optional[int] = tmp_path / "cache"
lowercase__ : Optional[Any] = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : Union[str, Any] = features.copy() if features else default_expected_features
lowercase__ : List[str] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : List[Any] = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase__ : int = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
lowercase__ : Optional[Any] = features.copy()
lowercase__ : Optional[int] = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : Tuple = tmp_path / "cache"
lowercase__ : Dict = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase)
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : int):
lowercase__ : Tuple = tmp_path / "cache"
lowercase__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Optional[Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list])
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str):
if issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : List[str] = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Dict = [jsonl_path]
lowercase__ : Tuple = tmp_path / "cache"
lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple=("train",)):
assert isinstance(_lowerCamelCase , _lowerCamelCase)
for split in splits:
lowercase__ : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str]):
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
lowercase__ : Dict = tmp_path / "cache"
lowercase__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : List[Any] = features.copy() if features else default_expected_features
lowercase__ : Tuple = (
Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
lowercase__ : List[str] = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase)
@pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"])
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Any):
if split:
lowercase__ : Union[str, Any] = {split: jsonl_path}
else:
lowercase__ : Union[str, Any] = "train"
lowercase__ : List[str] = {"train": jsonl_path, "test": jsonl_path}
lowercase__ : Any = tmp_path / "cache"
lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowercase_ ( _lowerCamelCase : Optional[int]):
return json.load(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return [json.loads(_lowerCamelCase) for line in buffer]
class snake_case_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[Any] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : Union[str, Any] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
lowercase__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Tuple = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __UpperCamelCase ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
lowercase__ : Any = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[int] ) -> List[Any]:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[int] ) -> int:
lowercase__ : Tuple = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}'''
lowercase__ : List[Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : Optional[Any] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
lowercase__ : Any = f.read()
assert exported_content == original_content
| 333 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int):
return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase) - ngram_size + 1)]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 333 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class snake_case_ :
def __UpperCamelCase ( self : List[str] ) -> str:
torch.manual_seed(0 )
lowercase__ : List[Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ : Tuple = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __UpperCamelCase ( self : Any ) -> Optional[int]:
lowercase__ : str = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ )
lowercase__ : Dict = inputs["prompt"]
lowercase__ : List[str] = inputs["generator"]
lowercase__ : Tuple = inputs["num_inference_steps"]
lowercase__ : Union[str, Any] = inputs["output_type"]
if "image" in inputs:
lowercase__ : Optional[int] = inputs["image"]
else:
lowercase__ : List[Any] = None
if "mask_image" in inputs:
lowercase__ : int = inputs["mask_image"]
else:
lowercase__ : Union[str, Any] = None
if "original_image" in inputs:
lowercase__ : Tuple = inputs["original_image"]
else:
lowercase__ : Tuple = None
lowercase__ , lowercase__ : Optional[int] = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
lowercase__ : Optional[int] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ : List[str] = image
if mask_image is not None:
lowercase__ : List[Any] = mask_image
if original_image is not None:
lowercase__ : Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
lowercase__ : Dict = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
lowercase__ : str = inputs["generator"]
lowercase__ : Optional[Any] = inputs["num_inference_steps"]
lowercase__ : Any = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ : Optional[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ : int = image
if mask_image is not None:
lowercase__ : Optional[int] = mask_image
if original_image is not None:
lowercase__ : Tuple = original_image
lowercase__ : Tuple = pipe_loaded(**lowercase_ )[0]
lowercase__ : Any = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
lowercase__ : Optional[Any] = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
lowercase__ : Optional[int] = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ )
lowercase__ : Union[str, Any] = pipe_loaded(**lowercase_ )[0]
lowercase__ : List[str] = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1E-4 )
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : list[float]):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative")
if not cash_flows:
raise ValueError("Cash flows list cannot be empty")
lowercase__ : str = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase))
return round(_lowerCamelCase , ndigits=2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : List[str]):
lowercase__ : Dict = len(_lowerCamelCase)
lowercase__ : Union[str, Any] = sum(_lowerCamelCase)
lowercase__ : Any = [[False for x in range(s + 1)] for y in range(n + 1)]
for i in range(1 , n + 1):
lowercase__ : Union[str, Any] = True
for i in range(1 , s + 1):
lowercase__ : Optional[int] = False
for i in range(1 , n + 1):
for j in range(1 , s + 1):
lowercase__ : List[Any] = dp[i][j - 1]
if arr[i - 1] <= j:
lowercase__ : List[str] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2) , -1 , -1):
if dp[n][j] is True:
lowercase__ : str = s - 2 * j
break
return diff
| 333 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCamelCase = NewType('''DataClass''', Any)
UpperCamelCase = NewType('''DataClassType''', Any)
def lowercase_ ( _lowerCamelCase : Dict):
if isinstance(_lowerCamelCase , _lowerCamelCase):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''')
def lowercase_ ( _lowerCamelCase : list):
lowercase__ : Dict = {str(_lowerCamelCase): choice for choice in choices}
return lambda _lowerCamelCase: str_to_choice.get(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( *,
_lowerCamelCase : Union[str, List[str]] = None , _lowerCamelCase : str = None , _lowerCamelCase : Any = dataclasses.MISSING , _lowerCamelCase : Callable[[], Any] = dataclasses.MISSING , _lowerCamelCase : dict = None , **_lowerCamelCase : int , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowercase__ : List[Any] = {}
if aliases is not None:
lowercase__ : Optional[int] = aliases
if help is not None:
lowercase__ : Optional[int] = help
return dataclasses.field(metadata=_lowerCamelCase , default=_lowerCamelCase , default_factory=_lowerCamelCase , **_lowerCamelCase)
class snake_case_ ( __A ):
__A : Iterable[DataClassType]
def __init__( self : Dict , lowercase_ : Union[DataClassType, Iterable[DataClassType]] , **lowercase_ : Tuple ) -> Dict:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowercase__ : Optional[int] = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
lowercase__ : int = [dataclass_types]
lowercase__ : List[Any] = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def __UpperCamelCase ( lowercase_ : ArgumentParser , lowercase_ : dataclasses.Field ) -> str:
lowercase__ : List[Any] = F'''--{field.name}'''
lowercase__ : Tuple = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowercase_ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
lowercase__ : Optional[Any] = kwargs.pop("aliases" , [] )
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : str = [aliases]
lowercase__ : Optional[int] = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(lowercase_ , "UnionType" ) and isinstance(lowercase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F''' Problem encountered in field \'{field.name}\'.''' )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
lowercase__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowercase__ : str = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowercase__ : Optional[int] = (
field.type.__args__[0] if isinstance(lowercase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
lowercase__ : List[str] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowercase__ : Dict = {}
if origin_type is Literal or (isinstance(field.type , lowercase_ ) and issubclass(field.type , lowercase_ )):
if origin_type is Literal:
lowercase__ : List[Any] = field.type.__args__
else:
lowercase__ : Any = [x.value for x in field.type]
lowercase__ : Tuple = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
lowercase__ : int = field.default
else:
lowercase__ : Optional[Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowercase__ : Union[str, Any] = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
lowercase__ : Optional[int] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowercase__ : Optional[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowercase__ : str = default
# This tells argparse we accept 0 or 1 value after --field_name
lowercase__ : str = "?"
# This is the value that will get picked if we do --field_name (without value)
lowercase__ : List[str] = True
elif isclass(lowercase_ ) and issubclass(lowercase_ , lowercase_ ):
lowercase__ : str = field.type.__args__[0]
lowercase__ : Tuple = "+"
if field.default_factory is not dataclasses.MISSING:
lowercase__ : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
lowercase__ : int = True
else:
lowercase__ : List[str] = field.type
if field.default is not dataclasses.MISSING:
lowercase__ : Tuple = field.default
elif field.default_factory is not dataclasses.MISSING:
lowercase__ : Dict = field.default_factory()
else:
lowercase__ : List[str] = True
parser.add_argument(lowercase_ , *lowercase_ , **lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowercase__ : Union[str, Any] = False
parser.add_argument(F'''--no_{field.name}''' , action="store_false" , dest=field.name , **lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : DataClassType ) -> List[Any]:
if hasattr(lowercase_ , "_argument_group_name" ):
lowercase__ : List[str] = self.add_argument_group(dtype._argument_group_name )
else:
lowercase__ : Optional[Any] = self
try:
lowercase__ : Dict[str, type] = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowercase_ ):
lowercase__ : Optional[Any] = ".".join(map(lowercase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
lowercase__ : Optional[int] = type_hints[field.name]
self._parse_dataclass_field(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : int=None , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=True , lowercase_ : List[str]=None , lowercase_ : Any=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowercase__ : List[Any] = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowercase__ : str = ArgumentParser()
args_file_parser.add_argument(lowercase_ , type=lowercase_ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
lowercase__ , lowercase__ : Union[str, Any] = args_file_parser.parse_known_args(args=lowercase_ )
lowercase__ : Union[str, Any] = vars(lowercase_ ).get(args_file_flag.lstrip("-" ) , lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
lowercase__ : Any = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowercase__ : List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
lowercase__ , lowercase__ : List[str] = self.parse_known_args(args=lowercase_ )
lowercase__ : int = []
for dtype in self.dataclass_types:
lowercase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowercase__ : Tuple = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ , lowercase_ )
lowercase__ : Tuple = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def __UpperCamelCase ( self : Dict , lowercase_ : Dict[str, Any] , lowercase_ : bool = False ) -> Tuple[DataClass, ...]:
lowercase__ : Optional[int] = set(args.keys() )
lowercase__ : List[str] = []
for dtype in self.dataclass_types:
lowercase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowercase__ : Optional[int] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowercase__ : Optional[int] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}''' )
return tuple(lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : str , lowercase_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(lowercase_ ) , encoding="utf-8" ) as open_json_file:
lowercase__ : Tuple = json.loads(open_json_file.read() )
lowercase__ : Dict = self.parse_dict(lowercase_ , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : bool = False ) -> Tuple[DataClass, ...]:
lowercase__ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) , allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 333 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase = None
UpperCamelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class snake_case_ :
__A : bool = True
__A : Optional[str] = None
# Automatically constructed
__A : ClassVar[str] = "PIL.Image.Image"
__A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__A : str = field(default="Image" ,init=__A ,repr=__A )
def __call__( self : str ) -> Any:
return self.pa_type
def __UpperCamelCase ( self : str , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : dict , lowercase_ : Optional[Any]=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowercase__ : Optional[Any] = {}
lowercase__ , lowercase__ : Any = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase__ : Optional[int] = PIL.Image.open(lowercase_ )
else:
lowercase__ : Optional[int] = path.split("::" )[-1]
try:
lowercase__ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["repo_id"]
lowercase__ : Tuple = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase__ : Tuple = None
with xopen(lowercase_ , "rb" , use_auth_token=lowercase_ ) as f:
lowercase__ : Union[str, Any] = BytesIO(f.read() )
lowercase__ : Optional[int] = PIL.Image.open(bytes_ )
else:
lowercase__ : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __UpperCamelCase ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __UpperCamelCase ( self : Any , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
lowercase__ : Optional[int] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase__ : int = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase__ : Dict = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowercase__ : Tuple = storage.field("bytes" )
else:
lowercase__ : int = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowercase__ : Any = storage.field("path" )
else:
lowercase__ : Any = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase__ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase__ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase__ : int = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : List[str] ):
with xopen(lowercase_ , "rb" ) as f:
lowercase__ : str = f.read()
return bytes_
lowercase__ : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__ : Optional[int] = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowercase__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowercase_ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase__ : List[str] = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def lowercase_ ( _lowerCamelCase : "PIL.Image.Image"):
lowercase__ : Any = BytesIO()
if image.format in list_image_compression_formats():
lowercase__ : List[str] = image.format
else:
lowercase__ : Dict = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_lowerCamelCase , format=_lowerCamelCase)
return buffer.getvalue()
def lowercase_ ( _lowerCamelCase : "PIL.Image.Image"):
if hasattr(_lowerCamelCase , "filename") and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase)}
def lowercase_ ( _lowerCamelCase : np.ndarray):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
lowercase__ : Dict = array.dtype
lowercase__ : str = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowercase__ : Dict = dtype.kind
lowercase__ : int = dtype.itemsize
lowercase__ : Union[str, Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase__ : Any = np.dtype("|u1")
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''')
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase__ : Any = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase__ : Any = dtype_byteorder + dtype_kind + str(_lowerCamelCase)
lowercase__ : Optional[int] = np.dtype(_lowerCamelCase)
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''')
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''')
lowercase__ : Optional[Any] = PIL.Image.fromarray(array.astype(_lowerCamelCase))
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase)}
def lowercase_ ( _lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]]):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if objs:
lowercase__ , lowercase__ : Any = first_non_null_value(_lowerCamelCase)
if isinstance(_lowerCamelCase , _lowerCamelCase):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowerCamelCase , np.ndarray):
lowercase__ : Optional[Any] = no_op_if_value_is_null(_lowerCamelCase)
return [obj_to_image_dict_func(_lowerCamelCase) for obj in objs]
elif isinstance(_lowerCamelCase , PIL.Image.Image):
lowercase__ : List[Any] = no_op_if_value_is_null(_lowerCamelCase)
return [obj_to_image_dict_func(_lowerCamelCase) for obj in objs]
else:
return objs
else:
return objs
| 333 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : Dict = MobileBertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Tuple = MobileBertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
lowercase__ : Any = load_tf_weights_in_mobilebert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 333 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ :
def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] ) -> Any:
lowercase__ : Tuple = question_encoder
lowercase__ : Optional[Any] = generator
lowercase__ : List[str] = self.question_encoder
def __UpperCamelCase ( self : Any , lowercase_ : Tuple ) -> Any:
if os.path.isfile(lowercase_ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase__ : Dict = os.path.join(lowercase_ , "question_encoder_tokenizer" )
lowercase__ : Tuple = os.path.join(lowercase_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(lowercase_ )
self.generator.save_pretrained(lowercase_ )
@classmethod
def __UpperCamelCase ( cls : Tuple , lowercase_ : List[Any] , **lowercase_ : Tuple ) -> str:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
lowercase__ : List[str] = kwargs.pop("config" , lowercase_ )
if config is None:
lowercase__ : Any = RagConfig.from_pretrained(lowercase_ )
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(
lowercase_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
lowercase__ : str = AutoTokenizer.from_pretrained(
lowercase_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=lowercase_ , generator=lowercase_ )
def __call__( self : Tuple , *lowercase_ : Any , **lowercase_ : Dict ) -> List[Any]:
return self.current_tokenizer(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Dict ) -> Dict:
return self.generator.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , *lowercase_ : List[str] , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
return self.generator.decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
lowercase__ : Any = self.question_encoder
def __UpperCamelCase ( self : int ) -> str:
lowercase__ : int = self.generator
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : str = "longest" , lowercase_ : str = None , lowercase_ : bool = True , **lowercase_ : List[Any] , ) -> BatchEncoding:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , lowercase_ , )
if max_length is None:
lowercase__ : Any = self.current_tokenizer.model_max_length
lowercase__ : int = self(
lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , max_length=lowercase_ , padding=lowercase_ , truncation=lowercase_ , **lowercase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowercase__ : Optional[int] = self.current_tokenizer.model_max_length
lowercase__ : List[Any] = self(
text_target=lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , **lowercase_ , )
lowercase__ : Dict = labels["input_ids"]
return model_inputs
| 333 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 1 |
import string
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : int = ""
for i in sequence:
lowercase__ : Any = ord(_lowerCamelCase)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Tuple = string.ascii_letters
lowercase__ : List[str] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_lowerCamelCase)] if c in letters else c for c in sequence)
def lowercase_ ( ):
from timeit import timeit
print("Running performance benchmarks...")
lowercase__ : List[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=_lowerCamelCase)} seconds''')
print(f'''> atbash(): {timeit("atbash(printable)" , setup=_lowerCamelCase)} seconds''')
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Union[dict, list, tuple, torch.Tensor]):
lowercase__ : Optional[Any] = []
if isinstance(_lowerCamelCase , _lowerCamelCase):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase))
elif isinstance(_lowerCamelCase , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase))
elif isinstance(_lowerCamelCase , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("Not supported")
return shapes
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...]):
lowercase__ : Optional[Any] = []
for d in reversed(_lowerCamelCase):
idx.append(flat_idx % d)
lowercase__ : Union[str, Any] = flat_idx // d
return tuple(reversed(_lowerCamelCase))
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_lowerCamelCase : List[bool]) -> None:
lowercase__ : Optional[int] = True
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ : List[Any] = l[reversed_idx]
if start_edges is None:
lowercase__ : Union[str, Any] = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase)
if end_edges is None:
lowercase__ : str = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase)]
reduce_edge_list(_lowerCamelCase)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase) == 0:
return [()]
elif len(_lowerCamelCase) == 1:
return [(slice(start[0] , end[0] + 1),)]
lowercase__ : List[Tuple[slice, ...]] = []
lowercase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1))
else:
break
lowercase__ : Tuple[slice, ...] = tuple(_lowerCamelCase)
lowercase__ : List[str] = len(_lowerCamelCase)
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : Optional[int] = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : int = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
lowercase__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
lowercase__ : Dict = t.shape[:no_batch_dims]
lowercase__ : List[Any] = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase))
# _get_minimal_slice_set is inclusive
lowercase__ : int = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase))
# Get an ordered list of slices to perform
lowercase__ : List[str] = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
lowercase__ : int = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def lowercase_ ( _lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , ):
if not (len(_lowerCamelCase) > 0):
raise ValueError("Must provide at least one input")
lowercase__ : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase)]
lowercase__ : int = tuple([max(_lowerCamelCase) for s in zip(*_lowerCamelCase)])
def _prep_inputs(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
lowercase__ : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
lowercase__ : Any = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
lowercase__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
lowercase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , _lowerCamelCase)
lowercase__ : str = None
if _out is not None:
lowercase__ : Dict = tensor_tree_map(lambda _lowerCamelCase: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
lowercase__ : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ : Dict = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ : Dict = 0
lowercase__ : List[Any] = prepped_outputs
for _ in range(_lowerCamelCase):
# Chunk the input
if not low_mem:
lowercase__ : int = _select_chunk
else:
lowercase__ : str = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size) , no_batch_dims=len(_lowerCamelCase) , )
lowercase__ : Dict[str, Any] = tensor_tree_map(_lowerCamelCase , _lowerCamelCase)
# Run the layer on the chunk
lowercase__ : int = layer(**_lowerCamelCase)
# Allocate space for the output
if out is None:
lowercase__ : Union[str, Any] = tensor_tree_map(lambda _lowerCamelCase: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , _lowerCamelCase)
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase):
def assign(_lowerCamelCase : dict , _lowerCamelCase : dict) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase):
assign(_lowerCamelCase , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ : Dict = da[k]
assign(_lowerCamelCase , _lowerCamelCase)
elif isinstance(_lowerCamelCase , _lowerCamelCase):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ : Optional[int] = xa
elif isinstance(_lowerCamelCase , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ : int = output_chunk
else:
raise ValueError("Not supported")
i += chunk_size
lowercase__ : Any = tensor_tree_map(lambda _lowerCamelCase: t.view(orig_batch_dims + t.shape[1:]) , _lowerCamelCase)
return out
class snake_case_ :
def __init__( self : Optional[int] , lowercase_ : int = 5_12 , ) -> List[Any]:
lowercase__ : Optional[int] = max_chunk_size
lowercase__ : Optional[int] = None
lowercase__ : Optional[tuple] = None
def __UpperCamelCase ( self : List[str] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ : List[Any] = [c for c in candidates if c > min_chunk_size]
lowercase__ : str = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : int ) -> bool:
try:
with torch.no_grad():
fn(*lowercase_ , chunk_size=lowercase_ )
return True
except RuntimeError:
return False
lowercase__ : str = 0
lowercase__ : List[Any] = len(lowercase_ ) - 1
while i > min_viable_chunk_size_index:
lowercase__ : Tuple = test_chunk_size(candidates[i] )
if not viable:
lowercase__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ : Tuple = i
lowercase__ : List[Any] = (i + len(lowercase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : str , lowercase_ : Iterable , lowercase_ : Iterable ) -> bool:
lowercase__ : Optional[Any] = True
for aa, aa in zip(lowercase_ , lowercase_ ):
assert type(lowercase_ ) == type(lowercase_ )
if isinstance(lowercase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : int = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
lowercase__ : str = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int , ) -> int:
lowercase__ : str = True
lowercase__ : tuple = tree_map(lambda lowercase_ : a.shape if isinstance(lowercase_ , torch.Tensor ) else a , lowercase_ , lowercase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase_ )
lowercase__ : Optional[int] = self._compare_arg_caches(self.cached_arg_data , lowercase_ )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ : Optional[Any] = False
if not consistent:
lowercase__ : Tuple = self._determine_favorable_chunk_size(
lowercase_ , lowercase_ , lowercase_ , )
lowercase__ : int = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 333 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class snake_case_ ( unittest.TestCase ,__A ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : Any = load_tool("text-to-speech" )
self.tool.setup()
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowercase__ : Any = self.tool("hey" )
lowercase__ : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
lowercase__ : List[str] = self.tool("hey" )
lowercase__ : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 333 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''PoolFormerConfig'''
# Base docstring
UpperCamelCase = '''sail/poolformer_s12'''
UpperCamelCase = [1, 512, 7, 7]
# Image classification docstring
UpperCamelCase = '''sail/poolformer_s12'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : float = 0.0 , _lowerCamelCase : bool = False):
if drop_prob == 0.0 or not training:
return input
lowercase__ : Optional[int] = 1 - drop_prob
lowercase__ : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase__ : Union[str, Any] = keep_prob + torch.rand(_lowerCamelCase , dtype=input.dtype , device=input.device)
random_tensor.floor_() # binarize
lowercase__ : Any = input.div(_lowerCamelCase) * random_tensor
return output
class snake_case_ ( nn.Module ):
def __init__( self : Tuple , lowercase_ : Optional[float] = None ) -> None:
super().__init__()
lowercase__ : Dict = drop_prob
def __UpperCamelCase ( self : Optional[int] , lowercase_ : torch.Tensor ) -> torch.Tensor:
return drop_path(lowercase_ , self.drop_prob , self.training )
def __UpperCamelCase ( self : Dict ) -> str:
return "p={}".format(self.drop_prob )
class snake_case_ ( nn.Module ):
def __init__( self : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int]=None ) -> Any:
super().__init__()
lowercase__ : Optional[int] = patch_size if isinstance(lowercase_ , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase__ : Optional[int] = stride if isinstance(lowercase_ , collections.abc.Iterable ) else (stride, stride)
lowercase__ : int = padding if isinstance(lowercase_ , collections.abc.Iterable ) else (padding, padding)
lowercase__ : List[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_ )
lowercase__ : Optional[Any] = norm_layer(lowercase_ ) if norm_layer else nn.Identity()
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict ) -> int:
lowercase__ : Any = self.projection(lowercase_ )
lowercase__ : Optional[Any] = self.norm(lowercase_ )
return embeddings
class snake_case_ ( nn.GroupNorm ):
def __init__( self : Union[str, Any] , lowercase_ : Any , **lowercase_ : List[str] ) -> Optional[Any]:
super().__init__(1 , lowercase_ , **lowercase_ )
class snake_case_ ( nn.Module ):
def __init__( self : int , lowercase_ : str ) -> Optional[int]:
super().__init__()
lowercase__ : Optional[Any] = nn.AvgPoolad(lowercase_ , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase_ )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[Any] ) -> Tuple:
return self.pool(lowercase_ ) - hidden_states
class snake_case_ ( nn.Module ):
def __init__( self : List[str] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any ) -> Dict:
super().__init__()
lowercase__ : List[Any] = nn.Convad(lowercase_ , lowercase_ , 1 )
lowercase__ : Dict = nn.Convad(lowercase_ , lowercase_ , 1 )
lowercase__ : int = PoolFormerDropPath(lowercase_ )
if isinstance(config.hidden_act , lowercase_ ):
lowercase__ : int = ACTaFN[config.hidden_act]
else:
lowercase__ : Union[str, Any] = config.hidden_act
def __UpperCamelCase ( self : int , lowercase_ : Optional[int] ) -> Optional[Any]:
lowercase__ : Union[str, Any] = self.conva(lowercase_ )
lowercase__ : Optional[int] = self.act_fn(lowercase_ )
lowercase__ : Optional[int] = self.drop(lowercase_ )
lowercase__ : Any = self.conva(lowercase_ )
lowercase__ : Any = self.drop(lowercase_ )
return hidden_states
class snake_case_ ( nn.Module ):
def __init__( self : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> Optional[int]:
super().__init__()
lowercase__ : Optional[int] = PoolFormerPooling(lowercase_ )
lowercase__ : Tuple = PoolFormerOutput(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = PoolFormerGroupNorm(lowercase_ )
lowercase__ : Union[str, Any] = PoolFormerGroupNorm(lowercase_ )
# Useful for training neural nets
lowercase__ : Optional[Any] = PoolFormerDropPath(lowercase_ ) if drop_path > 0.0 else nn.Identity()
lowercase__ : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
lowercase__ : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_) ) , requires_grad=lowercase_ )
lowercase__ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowercase_) ) , requires_grad=lowercase_ )
def __UpperCamelCase ( self : List[str] , lowercase_ : Union[str, Any] ) -> int:
if self.use_layer_scale:
lowercase__ : Union[str, Any] = self.pooling(self.before_norm(lowercase_ ) )
lowercase__ : int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase__ : Optional[Any] = hidden_states + self.drop_path(lowercase_ )
lowercase__ : Optional[Any] = ()
lowercase__ : int = self.output(self.after_norm(lowercase_ ) )
lowercase__ : List[str] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase__ : List[str] = hidden_states + self.drop_path(lowercase_ )
lowercase__ : Tuple = (output,) + outputs
return outputs
else:
lowercase__ : str = self.drop_path(self.pooling(self.before_norm(lowercase_ ) ) )
# First residual connection
lowercase__ : Any = pooling_output + hidden_states
lowercase__ : List[Any] = ()
# Second residual connection inside the PoolFormerOutput block
lowercase__ : Optional[Any] = self.drop_path(self.output(self.after_norm(lowercase_ ) ) )
lowercase__ : Optional[Any] = hidden_states + layer_output
lowercase__ : str = (output,) + outputs
return outputs
class snake_case_ ( nn.Module ):
def __init__( self : Optional[int] , lowercase_ : Union[str, Any] ) -> Tuple:
super().__init__()
lowercase__ : Optional[Any] = config
# stochastic depth decay rule
lowercase__ : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase__ : List[str] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase__ : List[str] = nn.ModuleList(lowercase_ )
# Transformer blocks
lowercase__ : Optional[int] = []
lowercase__ : Any = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase__ : List[str] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowercase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowercase_ ) )
lowercase__ : List[Any] = nn.ModuleList(lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Optional[Any]=False , lowercase_ : Union[str, Any]=True ) -> Any:
lowercase__ : Any = () if output_hidden_states else None
lowercase__ : List[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase__ , lowercase__ : int = layers
# Get patch embeddings from hidden_states
lowercase__ : List[str] = embedding_layer(lowercase_ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowercase_ ):
lowercase__ : List[Any] = blk(lowercase_ )
lowercase__ : List[Any] = layer_outputs[0]
if output_hidden_states:
lowercase__ : int = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class snake_case_ ( __A ):
__A : Optional[Any] = PoolFormerConfig
__A : Tuple = "poolformer"
__A : Optional[Any] = "pixel_values"
__A : Optional[Any] = True
def __UpperCamelCase ( self : List[Any] , lowercase_ : List[Any] ) -> Optional[Any]:
if isinstance(lowercase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : List[Any]=False ) -> Tuple:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Dict = value
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." ,__A ,)
class snake_case_ ( __A ):
def __init__( self : str , lowercase_ : str ) -> int:
super().__init__(lowercase_ )
lowercase__ : int = config
lowercase__ : Any = PoolFormerEncoder(lowercase_ )
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self : Dict ) -> List[str]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
lowercase__ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowercase__ : str = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
lowercase__ : Dict = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
class snake_case_ ( nn.Module ):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] ) -> Tuple:
super().__init__()
lowercase__ : Union[str, Any] = nn.Linear(config.hidden_size , config.hidden_size )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str ) -> List[str]:
lowercase__ : Optional[int] = self.dense(lowercase_ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " ,__A ,)
class snake_case_ ( __A ):
def __init__( self : List[Any] , lowercase_ : List[Any] ) -> Any:
super().__init__(lowercase_ )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : Dict = PoolFormerModel(lowercase_ )
# Final norm
lowercase__ : int = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase__ : str = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
lowercase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.poolformer(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , )
lowercase__ : Dict = outputs[0]
lowercase__ : Dict = self.classifier(self.norm(lowercase_ ).mean([-2, -1] ) )
lowercase__ : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : int = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Union[str, Any] = "single_label_classification"
else:
lowercase__ : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
lowercase__ : List[Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase__ : int = CrossEntropyLoss()
lowercase__ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Dict = BCEWithLogitsLoss()
lowercase__ : Any = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 333 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowercase__ : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
lowercase__ : str = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase__ : List[str] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ : Any = model(lowercase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
@slow
def __UpperCamelCase ( self : Any ) -> Tuple:
lowercase__ : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
lowercase__ : int = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase__ : Any = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ : Union[str, Any] = model(lowercase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
| 333 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class snake_case_ ( __A ):
__A : List[str] = "pegasus"
__A : List[str] = ["past_key_values"]
__A : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , lowercase_ : Optional[int]=5_02_65 , lowercase_ : List[str]=10_24 , lowercase_ : int=12 , lowercase_ : Union[str, Any]=40_96 , lowercase_ : Optional[Any]=16 , lowercase_ : Tuple=12 , lowercase_ : int=40_96 , lowercase_ : List[Any]=16 , lowercase_ : Any=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : int="gelu" , lowercase_ : Any=10_24 , lowercase_ : Dict=0.1 , lowercase_ : Dict=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[str]=0 , lowercase_ : int=False , lowercase_ : List[str]=0 , lowercase_ : List[Any]=1 , lowercase_ : Optional[Any]=1 , **lowercase_ : Optional[int] , ) -> int:
lowercase__ : Optional[int] = vocab_size
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Dict = d_model
lowercase__ : Tuple = encoder_ffn_dim
lowercase__ : Union[str, Any] = encoder_layers
lowercase__ : List[Any] = encoder_attention_heads
lowercase__ : Optional[int] = decoder_ffn_dim
lowercase__ : Optional[int] = decoder_layers
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : Any = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : List[Any] = activation_dropout
lowercase__ : Any = activation_function
lowercase__ : Optional[int] = init_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : str = use_cache
lowercase__ : int = encoder_layers
lowercase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
@property
def __UpperCamelCase ( self : List[str] ) -> int:
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : Any ) -> int:
return self.d_model
| 333 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
UpperCamelCase = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCamelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class snake_case_ ( __A ):
__A : Tuple = VOCAB_FILES_NAMES
__A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = ["input_ids", "attention_mask"]
__A : Tuple = MBartTokenizer
__A : List[int] = []
__A : List[int] = []
def __init__( self : Optional[Any] , lowercase_ : Dict=None , lowercase_ : Dict=None , lowercase_ : Dict="<s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : str="</s>" , lowercase_ : List[Any]="<s>" , lowercase_ : Any="<unk>" , lowercase_ : int="<pad>" , lowercase_ : str="<mask>" , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : Optional[int]=None , **lowercase_ : Optional[int] , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
lowercase__ : Any = vocab_file
lowercase__ : str = False if not self.vocab_file else True
lowercase__ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowercase__ : Any = {
lang_code: self.convert_tokens_to_ids(lowercase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase__ : str = src_lang if src_lang is not None else "en_XX"
lowercase__ : List[str] = self.convert_tokens_to_ids(self._src_lang )
lowercase__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCamelCase ( self : List[str] ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self : List[Any] , lowercase_ : str ) -> None:
lowercase__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : List[str] = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[str] , lowercase_ : Optional[str] , **lowercase_ : Union[str, Any] ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowercase__ : Union[str, Any] = src_lang
lowercase__ : str = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
lowercase__ : str = self.convert_tokens_to_ids(lowercase_ )
lowercase__ : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str = "en_XX" , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "ro_RO" , **lowercase_ : Tuple , ) -> BatchEncoding:
lowercase__ : Union[str, Any] = src_lang
lowercase__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self : Dict ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] ) -> None:
lowercase__ : List[str] = self.convert_tokens_to_ids(lowercase_ )
lowercase__ : int = []
lowercase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
lowercase__ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__ : str = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self : Dict , lowercase_ : str ) -> None:
lowercase__ : List[str] = self.convert_tokens_to_ids(lowercase_ )
lowercase__ : str = []
lowercase__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
lowercase__ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__ : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCamelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase__ : Tuple = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 333 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class snake_case_ ( __A ):
__A : List[Any] = "marian"
__A : Dict = ["past_key_values"]
__A : Dict = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , lowercase_ : Any=5_81_01 , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=10_24 , lowercase_ : Tuple=12 , lowercase_ : Tuple=40_96 , lowercase_ : List[Any]=16 , lowercase_ : str=12 , lowercase_ : int=40_96 , lowercase_ : Optional[int]=16 , lowercase_ : List[Any]=0.0 , lowercase_ : str=0.0 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Any="gelu" , lowercase_ : Dict=10_24 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=0.02 , lowercase_ : Tuple=5_81_00 , lowercase_ : Any=False , lowercase_ : List[Any]=5_81_00 , lowercase_ : Dict=0 , lowercase_ : Tuple=0 , lowercase_ : Union[str, Any]=True , **lowercase_ : List[Any] , ) -> Optional[int]:
lowercase__ : Dict = vocab_size
lowercase__ : int = decoder_vocab_size or vocab_size
lowercase__ : str = max_position_embeddings
lowercase__ : Optional[Any] = d_model
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : Tuple = encoder_layers
lowercase__ : Union[str, Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : Optional[int] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : int = dropout
lowercase__ : int = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : List[Any] = activation_function
lowercase__ : Any = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : Dict = decoder_layerdrop
lowercase__ : Any = use_cache
lowercase__ : int = encoder_layers
lowercase__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : int = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class snake_case_ ( __A ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ : Tuple = {0: "batch"}
lowercase__ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowercase__ : str = {0: "batch", 1: "decoder_sequence"}
lowercase__ : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ , lowercase__ : List[Any] = self.num_layers
for i in range(lowercase_ ):
lowercase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
lowercase__ : str = {0: "batch", 2: "past_sequence + sequence"}
else:
lowercase__ : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : List[Any] = super().outputs
else:
lowercase__ : Union[str, Any] = super(lowercase_ , self ).outputs
if self.use_past:
lowercase__ , lowercase__ : List[Any] = self.num_layers
for i in range(lowercase_ ):
lowercase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
lowercase__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
lowercase__ : Tuple = seq_length if not self.use_past else 1
lowercase__ : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowercase__ : int = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ : List[Any] = common_inputs["input_ids"].shape
lowercase__ : Optional[Any] = common_inputs["decoder_input_ids"].shape[1]
lowercase__ , lowercase__ : Optional[int] = self.num_attention_heads
lowercase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : Union[str, Any] = decoder_seq_length + 3
lowercase__ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase__ : Tuple = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
lowercase__ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase__ , lowercase__ : str = self.num_layers
lowercase__ : Union[str, Any] = min(lowercase_ , lowercase_ )
lowercase__ : Tuple = max(lowercase_ , lowercase_ ) - min_num_layers
lowercase__ : List[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
lowercase__ : List[str] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowercase__ : List[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ : Optional[Any] = seqlen + 2
lowercase__ , lowercase__ : Union[str, Any] = self.num_layers
lowercase__ , lowercase__ : Any = self.num_attention_heads
lowercase__ : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase__ : List[str] = common_inputs["attention_mask"].dtype
lowercase__ : Dict = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
lowercase__ : Dict = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def __UpperCamelCase ( self : Tuple , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : str = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : List[Any] = tokenizer.num_special_tokens_to_add(lowercase_ )
lowercase__ : Dict = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : str = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase__ : Optional[Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def __UpperCamelCase ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def __UpperCamelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowercase__ : List[str] = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : Optional[Any] = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@property
def __UpperCamelCase ( self : str ) -> float:
return 1E-4
| 333 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int):
lowercase__ : Any = b.T
lowercase__ : Optional[int] = np.sum(np.square(_lowerCamelCase) , axis=1)
lowercase__ : Dict = np.sum(np.square(_lowerCamelCase) , axis=0)
lowercase__ : int = np.matmul(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
lowercase__ : Dict = x.reshape(-1 , 3)
lowercase__ : Optional[int] = squared_euclidean_distance(_lowerCamelCase , _lowerCamelCase)
return np.argmin(_lowerCamelCase , axis=1)
class snake_case_ ( __A ):
__A : Tuple = ["pixel_values"]
def __init__( self : Optional[int] , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : bool = True , **lowercase_ : Tuple , ) -> None:
super().__init__(**lowercase_ )
lowercase__ : Dict = size if size is not None else {"height": 2_56, "width": 2_56}
lowercase__ : Optional[int] = get_size_dict(lowercase_ )
lowercase__ : List[Any] = np.array(lowercase_ ) if clusters is not None else None
lowercase__ : Tuple = do_resize
lowercase__ : List[str] = size
lowercase__ : List[str] = resample
lowercase__ : Any = do_normalize
lowercase__ : Tuple = do_color_quantize
def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ) -> np.ndarray:
lowercase__ : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase_ , size=(size["height"], size["width"]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
lowercase__ : Union[str, Any] = rescale(image=lowercase_ , scale=1 / 1_27.5 , data_format=lowercase_ )
lowercase__ : Tuple = image - 1
return image
def __UpperCamelCase ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[Union[List[List[int]], np.ndarray]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowercase_ : int , ) -> PIL.Image.Image:
lowercase__ : str = do_resize if do_resize is not None else self.do_resize
lowercase__ : int = size if size is not None else self.size
lowercase__ : Dict = get_size_dict(lowercase_ )
lowercase__ : Optional[int] = resample if resample is not None else self.resample
lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowercase__ : Any = clusters if clusters is not None else self.clusters
lowercase__ : Optional[Any] = np.array(lowercase_ )
lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase__ : Any = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
lowercase__ : List[Any] = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
lowercase__ : Union[str, Any] = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowercase__ : List[Any] = np.array(lowercase_ )
lowercase__ : Dict = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowercase__ : Union[str, Any] = images.shape[0]
lowercase__ : Tuple = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowercase__ : str = list(lowercase_ )
else:
lowercase__ : Union[str, Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase__ : Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 333 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 1 |
import json
import sys
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Any):
with open(_lowerCamelCase , encoding="utf-8") as f:
lowercase__ : int = json.load(_lowerCamelCase)
lowercase__ : Union[str, Any] = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_lowerCamelCase):
lowercase__ : Any = results[benchmark_name]
lowercase__ : Optional[int] = benchmark_name.split("/")[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''')
lowercase__ : Union[str, Any] = "| metric |"
lowercase__ : Optional[int] = "|--------|"
lowercase__ : List[str] = "| new / old (diff) |"
for metric_name in sorted(_lowerCamelCase):
lowercase__ : str = benchmark_res[metric_name]
lowercase__ : Optional[Any] = metric_vals["new"]
lowercase__ : str = metric_vals.get("old" , _lowerCamelCase)
lowercase__ : Dict = metric_vals.get("diff" , _lowerCamelCase)
lowercase__ : Optional[Any] = f''' {new_val:f}''' if isinstance(_lowerCamelCase , (int, float)) else "None"
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(_lowerCamelCase , (int, float)) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(_lowerCamelCase , (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>")
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.writelines("\n".join(_lowerCamelCase))
if __name__ == "__main__":
UpperCamelCase = sys.argv[1]
UpperCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 333 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __A ):
__A : List[Any] = ["image_processor", "tokenizer"]
__A : Any = "AutoImageProcessor"
__A : Any = "AutoTokenizer"
def __init__( self : str , lowercase_ : List[str] , lowercase_ : Dict ) -> Dict:
super().__init__(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = self.image_processor
def __call__( self : Optional[Any] , lowercase_ : int=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : List[str] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
lowercase__ : Tuple = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowercase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : str , **lowercase_ : List[Any] ) -> Dict:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Tuple , *lowercase_ : Dict , **lowercase_ : List[Any] ) -> Any:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Optional[int] ) -> str:
return ["input_ids", "attention_mask", "pixel_values"]
| 333 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( __A ):
__A : str = (IPNDMScheduler,)
__A : int = (("num_inference_steps", 50),)
def __UpperCamelCase ( self : Any , **lowercase_ : Dict ) -> Dict:
lowercase__ : str = {"num_train_timesteps": 10_00}
config.update(**lowercase_ )
return config
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Optional[int]=0 , **lowercase_ : Union[str, Any] ) -> Any:
lowercase__ : Union[str, Any] = dict(self.forward_default_kwargs )
lowercase__ : Tuple = kwargs.pop("num_inference_steps" , lowercase_ )
lowercase__ : str = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Dict = self.get_scheduler_config(**lowercase_ )
lowercase__ : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowercase__ : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowercase__ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowercase__ : str = dummy_past_residuals[:]
lowercase__ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase__ : Tuple = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase__ : Tuple = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any]=0 , **lowercase_ : Optional[Any] ) -> List[Any]:
lowercase__ : List[Any] = dict(self.forward_default_kwargs )
lowercase__ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
lowercase__ : Union[str, Any] = self.dummy_sample
lowercase__ : Optional[Any] = 0.1 * sample
lowercase__ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : List[str] = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowercase__ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : List[str] = dummy_past_residuals[:]
lowercase__ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase__ : Tuple = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase__ : Optional[Any] = new_scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[str]:
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(**lowercase_ )
lowercase__ : str = scheduler_class(**lowercase_ )
lowercase__ : Optional[Any] = 10
lowercase__ : Any = self.dummy_model()
lowercase__ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : List[str] = model(lowercase_ , lowercase_ )
lowercase__ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Optional[int] = model(lowercase_ , lowercase_ )
lowercase__ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : Dict = dict(self.forward_default_kwargs )
lowercase__ : Dict = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[Any] = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**lowercase_ )
lowercase__ : Optional[int] = self.dummy_sample
lowercase__ : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
lowercase__ : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ : int = dummy_past_residuals[:]
lowercase__ : Any = scheduler.timesteps[5]
lowercase__ : Union[str, Any] = scheduler.timesteps[6]
lowercase__ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase__ : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowercase__ : Union[str, Any] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ , time_step=lowercase_ )
def __UpperCamelCase ( self : Dict ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowercase_ , time_step=lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Union[str, Any] = self.full_loop()
lowercase__ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 333 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class snake_case_ ( __A ):
__A : Optional[Any] = "table-transformer"
__A : Tuple = ["past_key_values"]
__A : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Dict , lowercase_ : Union[str, Any]=True , lowercase_ : Any=None , lowercase_ : int=3 , lowercase_ : List[str]=1_00 , lowercase_ : List[Any]=6 , lowercase_ : List[str]=20_48 , lowercase_ : Any=8 , lowercase_ : Any=6 , lowercase_ : Optional[int]=20_48 , lowercase_ : Tuple=8 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Any=True , lowercase_ : List[Any]="relu" , lowercase_ : List[Any]=2_56 , lowercase_ : List[str]=0.1 , lowercase_ : int=0.0 , lowercase_ : Any=0.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=1.0 , lowercase_ : Dict=False , lowercase_ : Tuple="sine" , lowercase_ : str="resnet50" , lowercase_ : Optional[int]=True , lowercase_ : int=False , lowercase_ : Any=1 , lowercase_ : Any=5 , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=1 , lowercase_ : Dict=5 , lowercase_ : int=2 , lowercase_ : Tuple=0.1 , **lowercase_ : str , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : int = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : Any = backbone_config.get("model_type" )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : Union[str, Any] = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowercase__ , lowercase__ , lowercase__ : str = None, None, None
lowercase__ : Optional[int] = use_timm_backbone
lowercase__ : List[str] = backbone_config
lowercase__ : Optional[int] = num_channels
lowercase__ : List[str] = num_queries
lowercase__ : List[str] = d_model
lowercase__ : Tuple = encoder_ffn_dim
lowercase__ : Tuple = encoder_layers
lowercase__ : Any = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : Union[str, Any] = dropout
lowercase__ : int = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Tuple = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : Dict = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : Dict = decoder_layerdrop
lowercase__ : Any = encoder_layers
lowercase__ : Union[str, Any] = auxiliary_loss
lowercase__ : List[Any] = position_embedding_type
lowercase__ : Optional[Any] = backbone
lowercase__ : str = use_pretrained_backbone
lowercase__ : Dict = dilation
# Hungarian matcher
lowercase__ : str = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : List[Any] = giou_cost
# Loss coefficients
lowercase__ : Optional[int] = mask_loss_coefficient
lowercase__ : List[Any] = dice_loss_coefficient
lowercase__ : Optional[int] = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> int:
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : Any ) -> int:
return self.d_model
class snake_case_ ( __A ):
__A : Optional[int] = version.parse("1.11" )
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __UpperCamelCase ( self : Dict ) -> float:
return 1E-5
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
return 12
| 333 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
import math
import sys
def lowercase_ ( _lowerCamelCase : int):
if number != int(_lowerCamelCase):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
lowercase__ : List[Any] = [-1] * (number + 1)
lowercase__ : Any = 0
for i in range(1 , number + 1):
lowercase__ : Optional[int] = sys.maxsize
lowercase__ : Union[str, Any] = int(math.sqrt(_lowerCamelCase))
for j in range(1 , root + 1):
lowercase__ : List[str] = 1 + answers[i - (j**2)]
lowercase__ : List[Any] = min(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case_ ( __A ):
def __init__( self : Union[str, Any] , lowercase_ : Dict ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = data
def __iter__( self : Optional[Any] ) -> Tuple:
for element in self.data:
yield element
def lowercase_ ( _lowerCamelCase : Any=True):
lowercase__ : Union[str, Any] = Accelerator(even_batches=_lowerCamelCase)
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowercase_ ( _lowerCamelCase : Accelerator , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False):
if iterable:
lowercase__ : Tuple = DummyIterableDataset(torch.as_tensor(range(_lowerCamelCase)))
else:
lowercase__ : List[str] = TensorDataset(torch.as_tensor(range(_lowerCamelCase)))
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase)
lowercase__ : str = accelerator.prepare(_lowerCamelCase)
return dl
def lowercase_ ( _lowerCamelCase : Accelerator , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[int] , _lowerCamelCase : List[int] , ):
lowercase__ : Tuple = create_dataloader(accelerator=_lowerCamelCase , dataset_size=_lowerCamelCase , batch_size=_lowerCamelCase)
lowercase__ : List[str] = [len(batch[0]) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowercase_ ( ):
lowercase__ : Tuple = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def lowercase_ ( ):
lowercase__ : Dict = create_accelerator(even_batches=_lowerCamelCase)
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def lowercase_ ( ):
lowercase__ : Union[str, Any] = create_accelerator(even_batches=_lowerCamelCase)
lowercase__ : Optional[int] = torch.nn.Linear(1 , 1)
lowercase__ : str = accelerator.prepare(_lowerCamelCase)
lowercase__ : Optional[Any] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1)
lowercase__ : Tuple = []
with accelerator.join_uneven_inputs([ddp_model]):
for batch_idx, batch in enumerate(_lowerCamelCase):
lowercase__ : List[str] = ddp_model(batch[0].float())
lowercase__ : Union[str, Any] = output.sum()
loss.backward()
batch_idxs.append(_lowerCamelCase)
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowercase_ ( _lowerCamelCase : str):
with warnings.catch_warnings(record=_lowerCamelCase) as w:
with accelerator.join_uneven_inputs([Mock()]):
pass
assert issubclass(w[-1].category , _lowerCamelCase)
assert "only supported for multi-GPU" in str(w[-1].message)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = True
lowercase__ : str = False
lowercase__ : Any = create_accelerator(even_batches=_lowerCamelCase)
lowercase__ : Union[str, Any] = torch.nn.Linear(1 , 1)
lowercase__ : List[Any] = accelerator.prepare(_lowerCamelCase)
lowercase__ : Tuple = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1)
lowercase__ : Dict = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1)
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase):
lowercase__ : Dict = train_dl.batch_sampler.even_batches
lowercase__ : Any = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowercase_ ( ):
lowercase__ : Optional[Any] = True
lowercase__ : List[str] = False
lowercase__ : Dict = create_accelerator(even_batches=_lowerCamelCase)
lowercase__ : Any = torch.nn.Linear(1 , 1)
lowercase__ : List[Any] = accelerator.prepare(_lowerCamelCase)
create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCamelCase)
lowercase__ : Optional[int] = create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1)
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase):
lowercase__ : Union[str, Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowercase_ ( ):
lowercase__ : Optional[int] = create_accelerator()
lowercase__ : Any = torch.nn.Linear(1 , 1)
lowercase__ : Union[str, Any] = accelerator.prepare(_lowerCamelCase)
create_dataloader(_lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=_lowerCamelCase)
with warnings.catch_warnings(record=_lowerCamelCase) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_lowerCamelCase):
pass
assert issubclass(w[-1].category , _lowerCamelCase)
assert "only supported for map-style datasets" in str(w[-1].message)
def lowercase_ ( ):
lowercase__ : str = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes")
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled")
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs")
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs")
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types")
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders")
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning")
lowercase__ : Optional[Any] = accelerator.state.distributed_type
lowercase__ : Union[str, Any] = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_lowerCamelCase)
lowercase__ : Dict = original_state
if __name__ == "__main__":
main()
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 1 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 333 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : int = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ , lowercase__ : int = emb.weight.shape
lowercase__ : List[str] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
lowercase__ : Dict = emb.weight.data
return lin_layer
def lowercase_ ( _lowerCamelCase : Optional[Any]):
lowercase__ : int = torch.load(_lowerCamelCase , map_location="cpu")
lowercase__ : List[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowercase__ : int = mam_aaa["model"]
remove_ignore_keys_(_lowerCamelCase)
lowercase__ : str = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ : Any = MaMaaaConfig(
vocab_size=_lowerCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
lowercase__ : Optional[int] = state_dict["decoder.embed_tokens.weight"]
lowercase__ : int = MaMaaaForConditionalGeneration(_lowerCamelCase)
model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
lowercase__ : Optional[Any] = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 333 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase = '''bert-base-cased'''
UpperCamelCase = '''fp16'''
UpperCamelCase = '''bf16'''
UpperCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
super().setUp()
lowercase__ : Union[str, Any] = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowercase_ ):
lowercase__ : int = self.dist_env.copy()
lowercase__ : Dict = F'''{i + 1}'''
lowercase__ : str = strategy
with mockenv_context(**lowercase_ ):
lowercase__ : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowercase_ ):
lowercase__ : int = self.dist_env.copy()
lowercase__ : Any = prefetch_policy
with mockenv_context(**lowercase_ ):
lowercase__ : Tuple = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __UpperCamelCase ( self : int ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowercase_ ):
lowercase__ : Union[str, Any] = self.dist_env.copy()
lowercase__ : List[Any] = state_dict_type
with mockenv_context(**lowercase_ ):
lowercase__ : str = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __UpperCamelCase ( self : Any ) -> Any:
lowercase__ : int = AutoModel.from_pretrained(lowercase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase__ : str = self.dist_env.copy()
lowercase__ : int = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase__ : Tuple = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowercase__ : List[Any] = "2000"
with mockenv_context(**lowercase_ ):
lowercase__ : Dict = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowercase__ : Optional[int] = self.dist_env.copy()
lowercase__ : int = "TRANSFORMER_BASED_WRAP"
lowercase__ : List[Any] = "T5Layer"
with mockenv_context(**lowercase_ ):
lowercase__ : List[Any] = FullyShardedDataParallelPlugin()
with self.assertRaises(lowercase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
lowercase__ : Any = self.dist_env.copy()
lowercase__ : str = "SIZE_BASED_WRAP"
lowercase__ : Tuple = "0"
with mockenv_context(**lowercase_ ):
lowercase__ : Any = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase__ : str = self.dist_env.copy()
lowercase__ : Dict = mp_dtype
with mockenv_context(**lowercase_ ):
lowercase__ : Union[str, Any] = Accelerator()
if mp_dtype == "fp16":
lowercase__ : int = torch.floataa
elif mp_dtype == "bf16":
lowercase__ : Union[str, Any] = torch.bfloataa
lowercase__ : Tuple = MixedPrecision(param_dtype=lowercase_ , reduce_dtype=lowercase_ , buffer_dtype=lowercase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowercase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase__ : Any = self.dist_env.copy()
lowercase__ : List[str] = str(lowercase_ ).lower()
with mockenv_context(**lowercase_ ):
lowercase__ : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
super().setUp()
lowercase__ : str = 0.82
lowercase__ : List[Any] = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowercase__ : int = {
"multi_gpu_fp16": 32_00,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 20_00,
"fsdp_full_shard_transformer_based_wrap_fp16": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase__ : Optional[Any] = 1_60
lowercase__ : int = 1_60
lowercase__ : Optional[Any] = inspect.getfile(accelerate.test_utils )
lowercase__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def __UpperCamelCase ( self : Tuple ) -> int:
lowercase__ : Union[str, Any] = os.path.join(self.test_scripts_folder , "test_performance.py" )
lowercase__ : Union[str, Any] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowercase__ : Optional[int] = cmd.copy()
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Tuple = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
lowercase__ : Union[str, Any] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(lowercase_ ):
lowercase__ : List[Any] = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowercase__ : Any = len(lowercase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase__ : Tuple = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
lowercase__ : Tuple = cmd_config[:-1]
lowercase__ : Dict = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : List[str] = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
lowercase__ : int = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase__ : Union[str, Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 333 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[Any] = LongformerTokenizer
__A : Dict = True
__A : Any = LongformerTokenizerFast
__A : str = True
def __UpperCamelCase ( self : List[Any] ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : int = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : str , **lowercase_ : List[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Tuple , **lowercase_ : List[str] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : Tuple = "lower newer"
lowercase__ : Union[str, Any] = "lower newer"
return input_text, output_text
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : str = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
lowercase__ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCamelCase ( self : Any ) -> Dict:
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
lowercase__ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
lowercase__ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
lowercase__ : str = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Any = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : int = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase__ : Any = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : str = "Encode this sequence."
lowercase__ : List[str] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowercase__ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
lowercase__ : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowercase__ : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
lowercase__ : str = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
lowercase__ : int = tokenizer.convert_tokens_to_ids(lowercase_ )
lowercase__ : Tuple = "Encode <mask> sequence"
lowercase__ : Optional[Any] = "Encode <mask>sequence"
lowercase__ : Optional[int] = tokenizer.encode(lowercase_ )
lowercase__ : Union[str, Any] = encoded.index(lowercase_ )
lowercase__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = tokenizer.encode(lowercase_ )
lowercase__ : str = encoded.index(lowercase_ )
lowercase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Any ) -> Any:
pass
def __UpperCamelCase ( self : Any ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = "A, <mask> AllenNLP sentence."
lowercase__ : Dict = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["trim_offsets"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Tuple = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[int] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Any = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Dict = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 333 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
def __init__( self : int , lowercase_ : Any , lowercase_ : List[str]=13 , lowercase_ : List[str]=30 , lowercase_ : Any=2 , lowercase_ : Any=3 , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : Dict=32 , lowercase_ : Any=5 , lowercase_ : List[Any]=4 , lowercase_ : int=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Dict=3 , lowercase_ : Dict=0.6 , lowercase_ : Optional[Any]=None , ) -> int:
lowercase__ : str = parent
lowercase__ : Tuple = batch_size
lowercase__ : Any = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : List[Any] = num_channels
lowercase__ : List[Any] = is_training
lowercase__ : Tuple = use_labels
lowercase__ : str = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = mask_ratio
lowercase__ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : Optional[int] = (image_size // patch_size) ** 2
lowercase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self : Dict ) -> int:
lowercase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Optional[Any]:
lowercase__ : str = ViTMAEModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> Tuple:
lowercase__ : Tuple = ViTMAEForPreTraining(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Tuple = model(lowercase_ )
lowercase__ : int = (self.image_size // self.patch_size) ** 2
lowercase__ : Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Any = 1
lowercase__ : int = ViTMAEForPreTraining(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Dict = model(lowercase_ )
lowercase__ : str = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
lowercase__ : List[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__A : Tuple = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
__A : List[Any] = False
__A : Any = False
__A : List[Any] = False
__A : int = False
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Optional[Any] = ViTMAEModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
pass
def __UpperCamelCase ( self : List[Any] ) -> Any:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def __UpperCamelCase ( self : int ) -> int:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(lowercase_ )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple ) -> List[str]:
# make masks reproducible
np.random.seed(2 )
lowercase__ : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Any = torch.from_numpy(lowercase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Dict = pt_noise
super().check_pt_tf_models(lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : Union[str, Any] = outputs[0].cpu().numpy()
lowercase__ : Tuple = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase__ : Dict = model_class.from_pretrained(lowercase_ )
model.to(lowercase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
# Make sure we don't have nans
lowercase__ : int = after_outputs[0].cpu().numpy()
lowercase__ : List[Any] = 0
lowercase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : Tuple ) -> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : List[str] ) -> Any:
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : Any ) -> Any:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = ViTMAEModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase_ ( ):
lowercase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Tuple = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowercase_ )
lowercase__ : int = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : int = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : str = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**lowercase_ , noise=torch.from_numpy(lowercase_ ).to(device=lowercase_ ) )
# verify the logits
lowercase__ : Tuple = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : List[Any] = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase_ ) , atol=1E-4 ) )
| 333 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Optional[int] = SMALL_MODEL_IDENTIFIER
lowercase__ : int = "pt"
lowercase__ : Optional[int] = "tf"
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Any ) -> List[str]:
lowercase__ : Union[str, Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowercase_ )
def __UpperCamelCase ( self : Tuple , lowercase_ : Any ) -> Dict:
lowercase__ : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase_ )
model_tf.save_pretrained(lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Any:
lowercase__ : List[Any] = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ : List[str] = FeaturesManager.determine_framework(self.test_model , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase_ )
lowercase__ : List[Any] = FeaturesManager.determine_framework(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase_ )
lowercase__ : Optional[Any] = FeaturesManager.determine_framework(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase_ )
lowercase__ : Optional[int] = FeaturesManager.determine_framework(lowercase_ )
self.assertEqual(lowercase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase_ )
lowercase__ : List[str] = FeaturesManager.determine_framework(lowercase_ )
self.assertEqual(lowercase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowercase_ ):
lowercase__ : str = FeaturesManager.determine_framework(lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowercase__ : Union[str, Any] = MagicMock(return_value=lowercase_ )
with patch("transformers.onnx.features.is_tf_available" , lowercase_ ):
lowercase__ : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ : Dict = MagicMock(return_value=lowercase_ )
with patch("transformers.onnx.features.is_torch_available" , lowercase_ ):
lowercase__ : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase_ , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ : str = MagicMock(return_value=lowercase_ )
lowercase__ : Union[str, Any] = MagicMock(return_value=lowercase_ )
with patch("transformers.onnx.features.is_tf_available" , lowercase_ ), patch(
"transformers.onnx.features.is_torch_available" , lowercase_ ):
lowercase__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase_ , self.framework_pt )
# Both not in environment -> raise error
lowercase__ : Optional[Any] = MagicMock(return_value=lowercase_ )
lowercase__ : Union[str, Any] = MagicMock(return_value=lowercase_ )
with patch("transformers.onnx.features.is_tf_available" , lowercase_ ), patch(
"transformers.onnx.features.is_torch_available" , lowercase_ ):
with self.assertRaises(lowercase_ ):
lowercase__ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase__ : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase__ : Optional[Any] = json.loads(_lowerCamelCase)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase__ : List[str] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase__ : List[Any] = json.loads(_lowerCamelCase)
if not mpi_options.get("sagemaker_mpi_enabled" , _lowerCamelCase):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case_ ( __A ):
__A : str = field(
default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,)
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase_ , )
@cached_property
def __UpperCamelCase ( self : Dict ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowercase__ : List[Any] = torch.device("cpu" )
lowercase__ : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
lowercase__ : Tuple = smp.local_rank()
lowercase__ : Union[str, Any] = torch.device("cuda" , lowercase_ )
lowercase__ : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowercase__ : List[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowercase__ : Tuple = torch.device("cuda" , self.local_rank )
lowercase__ : Any = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase__ : Any = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase__ : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowercase__ : Dict = torch.device("cuda" , self.local_rank )
lowercase__ : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_ )
return device
@property
def __UpperCamelCase ( self : Tuple ) -> List[str]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCamelCase ( self : str ) -> str:
return not is_sagemaker_model_parallel_available()
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
return False
| 333 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''sentencepiece.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
UpperCamelCase = {
'''google/rembert''': 256,
}
class snake_case_ ( __A ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , lowercase_ : Any , lowercase_ : Any=False , lowercase_ : Any=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]="[CLS]" , lowercase_ : Tuple="[SEP]" , lowercase_ : Tuple="[UNK]" , lowercase_ : Tuple="[SEP]" , lowercase_ : List[Any]="[PAD]" , lowercase_ : Optional[Any]="[CLS]" , lowercase_ : List[str]="[MASK]" , **lowercase_ : List[str] , ) -> List[str]:
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , )
lowercase__ : Dict = do_lower_case
lowercase__ : Union[str, Any] = remove_space
lowercase__ : Union[str, Any] = keep_accents
lowercase__ : Tuple = vocab_file
lowercase__ : Any = spm.SentencePieceProcessor()
self.sp_model.Load(lowercase_ )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
return len(self.sp_model )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
lowercase__ : List[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[Any] = self.__dict__.copy()
lowercase__ : int = None
return state
def __setstate__( self : Optional[int] , lowercase_ : Any ) -> List[str]:
lowercase__ : Dict = d
lowercase__ : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any=False ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.sp_model.EncodeAsPieces(lowercase_ )
return pieces
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] ) -> Any:
return self.sp_model.PieceToId(lowercase_ )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] ) -> str:
return self.sp_model.IdToPiece(lowercase_ )
def __UpperCamelCase ( self : int , lowercase_ : List[str] ) -> List[str]:
lowercase__ : str = self.sp_model.decode_pieces(lowercase_ )
return out_string
def __UpperCamelCase ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def __UpperCamelCase ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : List[str] = [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
lowercase__ : Dict = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 333 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCamelCase = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]):
for attribute in key.split("."):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase__ : int = "lm_head"
lowercase__ : Dict = getattr(_lowerCamelCase , _lowerCamelCase)
if weight_type is not None:
lowercase__ : Dict = getattr(_lowerCamelCase , _lowerCamelCase).shape
else:
lowercase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ : int = value
elif weight_type == "weight_g":
lowercase__ : List[Any] = value
elif weight_type == "weight_v":
lowercase__ : Optional[Any] = value
elif weight_type == "bias":
lowercase__ : Union[str, Any] = value
else:
lowercase__ : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''')
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int):
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = fairseq_model.state_dict()
lowercase__ : Union[str, Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase__ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : Union[str, Any] = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
lowercase__ : Optional[Any] = True
if "*" in mapped_key:
lowercase__ : Dict = name.split(_lowerCamelCase)[0].split(".")[-2]
lowercase__ : List[Any] = mapped_key.replace("*" , _lowerCamelCase)
if "weight_g" in name:
lowercase__ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowercase__ : Dict = "weight_v"
elif "bias" in name:
lowercase__ : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Dict = "weight"
else:
lowercase__ : Tuple = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
continue
if not is_used:
unused_weights.append(_lowerCamelCase)
logger.warning(f'''Unused weights: {unused_weights}''')
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str]):
lowercase__ : Tuple = full_name.split("conv_layers.")[-1]
lowercase__ : List[str] = name.split(".")
lowercase__ : Optional[Any] = int(items[0])
lowercase__ : Tuple = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ : int = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ : str = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(_lowerCamelCase)
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=True):
if config_path is not None:
lowercase__ : Dict = UniSpeechConfig.from_pretrained(_lowerCamelCase)
else:
lowercase__ : Dict = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase__ : List[Any] = Dictionary.load_from_json(_lowerCamelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ : Union[str, Any] = target_dict.pad_index
lowercase__ : List[Any] = target_dict.bos_index
lowercase__ : Tuple = target_dict.eos_index
lowercase__ : List[Any] = len(target_dict.symbols)
lowercase__ : Any = os.path.join(_lowerCamelCase , "vocab.json")
if not os.path.isdir(_lowerCamelCase):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase))
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
lowercase__ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ : str = 42
lowercase__ : Optional[Any] = 43
with open(_lowerCamelCase , "w" , encoding="utf-8") as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = WavaVecaPhonemeCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
lowercase__ : Tuple = True if config.feat_extract_norm == "layer" else False
lowercase__ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
lowercase__ : str = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase)
processor.save_pretrained(_lowerCamelCase)
lowercase__ : List[str] = UniSpeechForCTC(_lowerCamelCase)
else:
lowercase__ : Dict = UniSpeechForPreTraining(_lowerCamelCase)
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1]), "w2v_path": checkpoint_path})
else:
lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
lowercase__ : List[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
hf_unispeech.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 333 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ ( __A ):
def __init__( self : Dict , lowercase_ : int , lowercase_ : Dict=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : List[str]=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Any=True , lowercase_ : Optional[int]=False , lowercase_ : str=False , lowercase_ : Any=False , lowercase_ : int=2 , lowercase_ : Any=99 , lowercase_ : List[Any]=0 , lowercase_ : Optional[Any]=32 , lowercase_ : Tuple=5 , lowercase_ : Tuple=4 , lowercase_ : int=0.1 , lowercase_ : int=0.1 , lowercase_ : Any=5_12 , lowercase_ : Tuple=12 , lowercase_ : List[Any]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : str=3 , lowercase_ : Tuple=4 , lowercase_ : int="last" , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=None , ) -> Union[str, Any]:
lowercase__ : Optional[Any] = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[Any] = seq_length
lowercase__ : Tuple = is_training
lowercase__ : Optional[int] = use_input_lengths
lowercase__ : Union[str, Any] = use_token_type_ids
lowercase__ : Dict = use_labels
lowercase__ : int = gelu_activation
lowercase__ : Dict = sinusoidal_embeddings
lowercase__ : Union[str, Any] = causal
lowercase__ : Optional[int] = asm
lowercase__ : Any = n_langs
lowercase__ : List[str] = vocab_size
lowercase__ : Tuple = n_special
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Optional[Any] = num_choices
lowercase__ : Union[str, Any] = summary_type
lowercase__ : List[str] = use_proj
lowercase__ : Union[str, Any] = scope
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Union[str, Any] = None
if self.use_input_lengths:
lowercase__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : int = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : Optional[Any] = None
lowercase__ : Any = None
lowercase__ : str = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : Dict , ) -> List[str]:
lowercase__ : str = FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : int = model(lowercase_ , lengths=lowercase_ , langs=lowercase_ )
lowercase__ : List[Any] = model(lowercase_ , langs=lowercase_ )
lowercase__ : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Union[str, Any] , ) -> Tuple:
lowercase__ : Optional[Any] = FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Dict = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , ) -> Optional[int]:
lowercase__ : Union[str, Any] = FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Tuple = model(lowercase_ )
lowercase__ : List[str] = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , ) -> Any:
lowercase__ : Tuple = FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Dict = model(lowercase_ )
lowercase__ : List[str] = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , p_mask=lowercase_ , )
lowercase__ : Optional[int] = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , )
((lowercase__) , ) : List[str] = result_with_labels.to_tuple()
lowercase__ : str = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
((lowercase__) , ) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __UpperCamelCase ( self : int , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[str] , ) -> Optional[int]:
lowercase__ : Optional[int] = FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : List[str] = model(lowercase_ )
lowercase__ : List[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Tuple , ) -> Union[str, Any]:
lowercase__ : List[str] = self.num_labels
lowercase__ : Union[str, Any] = FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : str , ) -> List[Any]:
lowercase__ : Union[str, Any] = self.num_choices
lowercase__ : Any = FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = config_and_inputs
lowercase__ : Tuple = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__A : Optional[int] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=False ) -> List[str]:
lowercase__ : List[Any] = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
lowercase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : int = FlaubertModelTester(self )
lowercase__ : Optional[int] = ConfigTester(self , config_class=lowercase_ , emb_dim=37 )
def __UpperCamelCase ( self : int ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ) -> Optional[int]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def __UpperCamelCase ( self : Dict ) -> Any:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def __UpperCamelCase ( self : int ) -> Dict:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : Optional[Any] = True
lowercase__ : List[str] = model_class(config=lowercase_ )
lowercase__ : int = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = torch.jit.trace(
lowercase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_ , os.path.join(lowercase_ , "traced_model.pt" ) )
lowercase__ : str = torch.jit.load(os.path.join(lowercase_ , "traced_model.pt" ) , map_location=lowercase_ )
loaded(inputs_dict["input_ids"].to(lowercase_ ) , inputs_dict["attention_mask"].to(lowercase_ ) )
@require_torch
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Any ) -> Optional[int]:
lowercase__ : Union[str, Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : Dict = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
lowercase__ : Union[str, Any] = model(lowercase_ )[0]
lowercase__ : Any = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowercase_ )
lowercase__ : Dict = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 333 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any]):
lowercase__ : Any = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase)
lowercase__ : Union[str, Any] = downstream_dict["projector.weight"]
lowercase__ : int = downstream_dict["projector.bias"]
lowercase__ : int = downstream_dict["model.post_net.linear.weight"]
lowercase__ : int = downstream_dict["model.post_net.linear.bias"]
return model
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict):
lowercase__ : Any = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase)
lowercase__ : int = downstream_dict["model.linear.weight"]
lowercase__ : str = downstream_dict["model.linear.bias"]
return model
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : List[str]):
lowercase__ : Tuple = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase)
lowercase__ : Union[str, Any] = downstream_dict["connector.weight"]
lowercase__ : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
lowercase__ : Tuple = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase__ : Union[str, Any] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase__ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowercase__ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowercase__ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowercase__ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowercase__ : Optional[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ : str = torch.load(_lowerCamelCase , map_location="cpu")
lowercase__ : Optional[Any] = checkpoint["Downstream"]
lowercase__ : Dict = UniSpeechSatConfig.from_pretrained(_lowerCamelCase)
lowercase__ : str = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase)
lowercase__ : Union[str, Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification"):
lowercase__ : int = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
elif arch.endswith("ForAudioFrameClassification"):
lowercase__ : List[str] = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
elif arch.endswith("ForXVector"):
lowercase__ : Any = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''')
if hf_config.use_weighted_layer_sum:
lowercase__ : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCamelCase)
hf_model.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 333 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : str = get_failure_array(_lowerCamelCase)
# 2) Step through text searching for pattern
lowercase__ , lowercase__ : int = 0, 0 # index into text, pattern
while i < len(_lowerCamelCase):
if pattern[j] == text[i]:
if j == (len(_lowerCamelCase) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase__ : List[str] = failure[j - 1]
continue
i += 1
return False
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Dict = [0]
lowercase__ : Dict = 0
lowercase__ : List[str] = 1
while j < len(_lowerCamelCase):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase__ : Optional[int] = failure[i - 1]
continue
j += 1
failure.append(_lowerCamelCase)
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase = '''abc1abc12'''
UpperCamelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
UpperCamelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase = '''ABABX'''
UpperCamelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
UpperCamelCase = '''AAAB'''
UpperCamelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
UpperCamelCase = '''abcdabcy'''
UpperCamelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
UpperCamelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 333 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''PoolFormerFeatureExtractor''']
UpperCamelCase = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 1 |
UpperCamelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCamelCase = [None] * 1000_0000
UpperCamelCase = True
UpperCamelCase = False
def lowercase_ ( _lowerCamelCase : int):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ : List[str] = chain(next_number(_lowerCamelCase))
lowercase__ : Optional[int] = number_chain
while number < 1000_0000:
lowercase__ : Union[str, Any] = number_chain
number *= 10
return number_chain
def lowercase_ ( _lowerCamelCase : int = 1000_0000):
for i in range(1 , _lowerCamelCase):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 333 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase_ ( _lowerCamelCase : Dict):
for param in module.parameters():
lowercase__ : Optional[int] = False
def lowercase_ ( ):
lowercase__ : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations.")
return device
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : Union[str, Any] = plt.imshow(_lowerCamelCase)
fig.axes.get_xaxis().set_visible(_lowerCamelCase)
fig.axes.get_yaxis().set_visible(_lowerCamelCase)
plt.show()
def lowercase_ ( ):
lowercase__ : Union[str, Any] = datetime.now()
lowercase__ : List[str] = current_time.strftime("%H:%M:%S")
return timestamp
| 333 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 1 |
from math import sqrt
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : List[Any] = 0
for i in range(1 , int(sqrt(_lowerCamelCase) + 1)):
if n % i == 0 and i != sqrt(_lowerCamelCase):
total += i + n // i
elif i == sqrt(_lowerCamelCase):
total += i
return total - n
def lowercase_ ( _lowerCamelCase : int = 1_0000):
lowercase__ : str = sum(
i
for i in range(1 , _lowerCamelCase)
if sum_of_divisors(sum_of_divisors(_lowerCamelCase)) == i and sum_of_divisors(_lowerCamelCase) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 333 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( __A ):
__A : Dict = (KDPMaDiscreteScheduler,)
__A : Optional[int] = 10
def __UpperCamelCase ( self : Tuple , **lowercase_ : Tuple ) -> Union[str, Any]:
lowercase__ : str = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def __UpperCamelCase ( self : Any ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase__ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : Dict = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : int = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : str = scheduler.scale_model_input(lowercase_ , lowercase_ )
lowercase__ : str = model(lowercase_ , lowercase_ )
lowercase__ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = output.prev_sample
lowercase__ : List[str] = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : int = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def __UpperCamelCase ( self : Dict ) -> Dict:
if torch_device == "mps":
return
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : str = self.dummy_model()
lowercase__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : Optional[int] = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_ )
lowercase__ : Any = model(lowercase_ , lowercase_ )
lowercase__ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = output.prev_sample
lowercase__ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
if torch_device == "mps":
return
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : Tuple = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase__ : Any = scheduler.scale_model_input(lowercase_ , lowercase_ )
lowercase__ : Any = model(lowercase_ , lowercase_ )
lowercase__ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = output.prev_sample
lowercase__ : Tuple = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : Any = torch.mean(torch.abs(lowercase_ ) )
if str(lowercase_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 333 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : int = 100):
lowercase__ : List[str] = n * (n + 1) * (2 * n + 1) / 6
lowercase__ : Optional[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares)
if __name__ == "__main__":
print(f"{solution() = }")
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : List[Any] = IFPipeline
__A : Dict = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = PipelineTesterMixin.required_optional_params - {"latents"}
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return self._get_dummy_components()
def __UpperCamelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : int=0 ) -> Union[str, Any]:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : int = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Dict ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __UpperCamelCase ( self : int ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCamelCase ( self : str ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCamelCase ( self : Any ) -> List[str]:
self._test_save_load_local()
def __UpperCamelCase ( self : List[Any] ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
# if
lowercase__ : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowercase__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowercase_ , tokenizer=lowercase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowercase__ , lowercase__ : Optional[int] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase__ : Tuple = None
lowercase__ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase__ : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
lowercase__ : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase__ : Tuple = IFInpaintingPipeline(**pipe_a.components )
lowercase__ : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Any ) -> List[str]:
# pipeline 1
_start_torch_memory_measurement()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type="np" , )
lowercase__ : Any = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Union[str, Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type="np" , )
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Dict ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
lowercase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : str = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type="np" , )
lowercase__ : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[str] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Tuple = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , original_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type="np" , )
lowercase__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Any ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
lowercase__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowercase_ )
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , mask_image=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type="np" , )
lowercase__ : str = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Optional[int] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(lowercase_ )
lowercase__ : Optional[Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , mask_image=lowercase_ , original_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase = sys.version_info >= (3, 10)
def lowercase_ ( _lowerCamelCase : Tuple=None , _lowerCamelCase : int=None):
return field(default_factory=lambda: default , metadata=_lowerCamelCase)
@dataclass
class snake_case_ :
__A : int
__A : float
__A : str
__A : bool
@dataclass
class snake_case_ :
__A : int = 42
__A : str = field(default="toto" ,metadata={"help": "help message"} )
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : Optional[bool] = None
class snake_case_ ( __A ):
__A : str = "titi"
__A : List[str] = "toto"
class snake_case_ ( __A ):
__A : Optional[int] = "titi"
__A : Union[str, Any] = "toto"
__A : str = 42
@dataclass
class snake_case_ :
__A : BasicEnum = "toto"
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : str = BasicEnum(self.foo )
@dataclass
class snake_case_ :
__A : MixedTypeEnum = "toto"
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : int = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
__A : Optional[int] = None
__A : Optional[float] = field(default=__A ,metadata={"help": "help message"} )
__A : Optional[str] = None
__A : Optional[List[str]] = list_field(default=[] )
__A : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
__A : List[int] = list_field(default=[] )
__A : List[int] = list_field(default=[1, 2, 3] )
__A : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
__A : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
__A : List[int] = field()
__A : str = field()
__A : BasicEnum = field()
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : int = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
__A : int
__A : "BasicEnum" = field()
__A : "Optional[bool]" = None
__A : "str" = field(default="toto" ,metadata={"help": "help message"} )
__A : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : bool | None = None
@dataclass
class snake_case_ :
__A : int | None = None
__A : float | None = field(default=__A ,metadata={"help": "help message"} )
__A : str | None = None
__A : list[str] | None = list_field(default=[] )
__A : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : int , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ) -> Tuple:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase__ : Dict = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
lowercase__ : List[str] = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , lowercase_ ) and yy.get("choices" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](lowercase_ ) , yy["type"](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : Any = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--bar" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--baz" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--flag" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowercase__) , ) : str = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[Any] = HfArgumentParser(lowercase_ )
lowercase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
expected.add_argument("--baz" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=lowercase_ , dest="baz" )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
lowercase__ : Union[str, Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : str = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Tuple = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Union[str, Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Dict = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Optional[int] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase__ : Any = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : int = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase__ : Dict = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowercase__ : Dict = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
@dataclass
class snake_case_ :
__A : Literal["titi", "toto", 42] = "toto"
lowercase__ : Union[str, Any] = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : Tuple = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
lowercase__ : int = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase__ : List[Any] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--bar" , default=lowercase_ , type=lowercase_ , help="help message" )
expected.add_argument("--baz" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--des" , nargs="+" , default=[] , type=lowercase_ )
lowercase__ : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : Any = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
lowercase__ : Dict = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--required_str" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[int] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : str = HfArgumentParser(lowercase_ )
lowercase__ : List[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowercase__ : Optional[Any] = parser.parse_dict(lowercase_ )[0]
lowercase__ : Dict = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Any = os.path.join(lowercase_ , "temp_json" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowercase__ : Optional[Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[int] = os.path.join(lowercase_ , "temp_yaml" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowercase__ : int = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 333 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
lowercase__ : List[Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
lowercase__ : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw).convert("RGB")
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711)),
])
lowercase__ : Optional[int] = transform(_lowerCamelCase).unsqueeze(0).to(_lowerCamelCase)
return image
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if "visual_encoder" in key:
lowercase__ : int = re.sub("visual_encoder*" , "vision_model.encoder" , _lowerCamelCase)
if "blocks" in key:
lowercase__ : Optional[Any] = re.sub(R"blocks" , "layers" , _lowerCamelCase)
if "attn" in key:
lowercase__ : Optional[Any] = re.sub(R"attn" , "self_attn" , _lowerCamelCase)
if "norm1" in key:
lowercase__ : List[Any] = re.sub(R"norm1" , "layer_norm1" , _lowerCamelCase)
if "norm2" in key:
lowercase__ : int = re.sub(R"norm2" , "layer_norm2" , _lowerCamelCase)
if "encoder.norm" in key:
lowercase__ : Union[str, Any] = re.sub(R"encoder.norm" , "post_layernorm" , _lowerCamelCase)
if "encoder.patch_embed.proj" in key:
lowercase__ : Union[str, Any] = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _lowerCamelCase)
if "encoder.pos_embed" in key:
lowercase__ : Union[str, Any] = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , _lowerCamelCase)
if "encoder.cls_token" in key:
lowercase__ : Dict = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , _lowerCamelCase)
if "self_attn" in key:
lowercase__ : List[Any] = re.sub(R"self_attn.proj" , "self_attn.projection" , _lowerCamelCase)
return key
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None):
if config_path is not None:
lowercase__ : int = BlipConfig.from_pretrained(_lowerCamelCase)
else:
lowercase__ : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
lowercase__ : Any = BlipForConditionalGeneration(_lowerCamelCase).eval()
lowercase__ : Tuple = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
lowercase__ : Tuple = blip_decoder(pretrained=_lowerCamelCase , image_size=384 , vit="base")
lowercase__ : Tuple = pt_model.eval()
lowercase__ : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : Any = modified_state_dict.pop(_lowerCamelCase)
lowercase__ : List[str] = rename_key(_lowerCamelCase)
lowercase__ : str = value
hf_model.load_state_dict(_lowerCamelCase)
lowercase__ : Optional[int] = 384
lowercase__ : Optional[Any] = load_demo_image(image_size=_lowerCamelCase , device="cpu")
lowercase__ : Optional[int] = BertTokenizer.from_pretrained("bert-base-uncased")
lowercase__ : Union[str, Any] = tokenizer(["a picture of"]).input_ids
lowercase__ : List[str] = hf_model.generate(_lowerCamelCase , _lowerCamelCase)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ : List[Any] = hf_model.generate(_lowerCamelCase)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_lowerCamelCase)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ : Tuple = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
lowercase__ : int = blip_vqa(pretrained=_lowerCamelCase , image_size=_lowerCamelCase , vit="base")
vqa_model.eval()
lowercase__ : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : Dict = modified_state_dict.pop(_lowerCamelCase)
lowercase__ : Tuple = rename_key(_lowerCamelCase)
lowercase__ : int = value
lowercase__ : Any = BlipForQuestionAnswering(_lowerCamelCase)
hf_vqa_model.load_state_dict(_lowerCamelCase)
lowercase__ : List[Any] = ["How many dogs are in this image?"]
lowercase__ : int = tokenizer(_lowerCamelCase , return_tensors="pt").input_ids
lowercase__ : int = hf_vqa_model.generate(_lowerCamelCase , _lowerCamelCase)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa")
lowercase__ : Union[str, Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
lowercase__ : Optional[Any] = blip_itm(pretrained=_lowerCamelCase , image_size=_lowerCamelCase , vit="base")
itm_model.eval()
lowercase__ : Optional[int] = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : Union[str, Any] = modified_state_dict.pop(_lowerCamelCase)
lowercase__ : Any = rename_key(_lowerCamelCase)
lowercase__ : int = value
lowercase__ : Optional[Any] = BlipForImageTextRetrieval(_lowerCamelCase)
lowercase__ : Tuple = ["A picture of a woman with a dog sitting in a beach"]
lowercase__ : str = tokenizer(
_lowerCamelCase , return_tensors="pt" , padding="max_length" , truncation=_lowerCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_lowerCamelCase)
hf_itm_model.eval()
lowercase__ : List[str] = hf_itm_model(_lowerCamelCase , _lowerCamelCase , use_itm_head=_lowerCamelCase)
lowercase__ : Optional[int] = hf_itm_model(_lowerCamelCase , _lowerCamelCase , use_itm_head=_lowerCamelCase)
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 333 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : str = RobertaTokenizer
__A : Any = RobertaTokenizerFast
__A : List[Any] = True
__A : str = {"cls_token": "<s>"}
def __UpperCamelCase ( self : int ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : List[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[Any] = {"unk_token": "<unk>"}
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : int , **lowercase_ : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Tuple , **lowercase_ : str ) -> Dict:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Any ) -> List[str]:
lowercase__ : Optional[Any] = "lower newer"
lowercase__ : Dict = "lower newer"
return input_text, output_text
def __UpperCamelCase ( self : Tuple ) -> List[str]:
lowercase__ : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[int] = "lower newer"
lowercase__ : Dict = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : str = tokens + [tokenizer.unk_token]
lowercase__ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCamelCase ( self : int ) -> str:
lowercase__ : int = self.tokenizer_class.from_pretrained("roberta-base" )
lowercase__ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
lowercase__ : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
lowercase__ : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Dict = "Encode this sequence."
lowercase__ : Dict = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowercase__ : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
lowercase__ : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowercase__ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
lowercase__ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
lowercase__ : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
lowercase__ : Dict = "Encode <mask> sequence"
lowercase__ : Any = "Encode <mask>sequence"
lowercase__ : Dict = tokenizer.encode(lowercase_ )
lowercase__ : Any = encoded.index(lowercase_ )
lowercase__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = tokenizer.encode(lowercase_ )
lowercase__ : Optional[Any] = encoded.index(lowercase_ )
lowercase__ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Tuple ) -> str:
pass
def __UpperCamelCase ( self : Any ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : int = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Tuple = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Union[str, Any] = "A, <mask> AllenNLP sentence."
lowercase__ : Any = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : Optional[int] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCamelCase ( self : Tuple ) -> Dict:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase__ : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["trim_offsets"] , lowercase_ )
def __UpperCamelCase ( self : str ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__ : List[str] = F'''{text_of_1_token} {text_of_1_token}'''
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : List[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[int] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : str = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Dict = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : List[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 333 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class snake_case_ ( __A ):
__A : List[Any] = "mctct"
def __init__( self : List[str] , lowercase_ : List[str]=80_65 , lowercase_ : List[Any]=15_36 , lowercase_ : Union[str, Any]=36 , lowercase_ : Tuple=61_44 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[Any]=3_84 , lowercase_ : Optional[int]=9_20 , lowercase_ : Dict=1E-5 , lowercase_ : List[str]=0.3 , lowercase_ : Union[str, Any]="relu" , lowercase_ : int=0.02 , lowercase_ : Union[str, Any]=0.3 , lowercase_ : List[Any]=0.3 , lowercase_ : List[str]=1 , lowercase_ : Optional[int]=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : int=0.3 , lowercase_ : List[str]=1 , lowercase_ : Any=(7,) , lowercase_ : Optional[int]=(3,) , lowercase_ : Tuple=80 , lowercase_ : Tuple=1 , lowercase_ : List[Any]=None , lowercase_ : Dict="sum" , lowercase_ : Optional[int]=False , **lowercase_ : str , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : Dict = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Any = num_attention_heads
lowercase__ : Union[str, Any] = attention_head_dim
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : int = layer_norm_eps
lowercase__ : List[Any] = layerdrop
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Optional[Any] = initializer_range
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : List[Any] = pad_token_id
lowercase__ : List[Any] = bos_token_id
lowercase__ : Union[str, Any] = eos_token_id
lowercase__ : Tuple = conv_glu_dim
lowercase__ : Any = conv_dropout
lowercase__ : Tuple = num_conv_layers
lowercase__ : Optional[int] = input_feat_per_channel
lowercase__ : Union[str, Any] = input_channels
lowercase__ : List[Any] = conv_channels
lowercase__ : List[Any] = ctc_loss_reduction
lowercase__ : Tuple = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowercase__ : Any = list(lowercase_ )
lowercase__ : List[Any] = list(lowercase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 333 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class snake_case_ ( __A ,unittest.TestCase ):
__A : str = XLMProphetNetTokenizer
__A : Tuple = False
__A : str = True
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : List[str] = XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Tuple:
lowercase__ : Optional[Any] = "[PAD]"
lowercase__ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowercase__ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 10_12 )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_12 )
def __UpperCamelCase ( self : Any ) -> List[str]:
lowercase__ : List[str] = XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
lowercase__ : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def __UpperCamelCase ( self : Any ) -> Dict:
lowercase__ : Optional[Any] = "Hello World!"
lowercase__ : Union[str, Any] = [3_53_89, 66_72, 49, 2]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
# fmt: off
lowercase__ : List[Any] = {"input_ids": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 333 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def lowercase_ ( _lowerCamelCase : Optional[int]):
if hor == 128:
lowercase__ : Dict = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : Union[str, Any] = (32, 128, 256)
lowercase__ : Dict = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowercase__ : Tuple = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : Union[str, Any] = (32, 64, 128, 256)
lowercase__ : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowercase__ : Union[str, Any] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''')
lowercase__ : Union[str, Any] = model.state_dict()
lowercase__ : List[Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowercase__ : Tuple = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : Dict = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''')
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( ):
lowercase__ : List[str] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowercase__ : List[str] = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch")
lowercase__ : str = model
lowercase__ : Optional[int] = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : Any = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin")
with open("hub/hopper-medium-v2/value_function/config.json" , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( __A ,__A ):
__A : Optional[Any] = "maskformer-swin"
__A : Optional[int] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , lowercase_ : Any=2_24 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[int]=3 , lowercase_ : Dict=96 , lowercase_ : Optional[int]=[2, 2, 6, 2] , lowercase_ : int=[3, 6, 12, 24] , lowercase_ : Optional[Any]=7 , lowercase_ : Any=4.0 , lowercase_ : Tuple=True , lowercase_ : List[Any]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Any=False , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[Any]=1E-5 , lowercase_ : str=None , lowercase_ : Dict=None , **lowercase_ : List[Any] , ) -> str:
super().__init__(**lowercase_ )
lowercase__ : List[Any] = image_size
lowercase__ : List[str] = patch_size
lowercase__ : Dict = num_channels
lowercase__ : List[Any] = embed_dim
lowercase__ : str = depths
lowercase__ : List[str] = len(lowercase_ )
lowercase__ : str = num_heads
lowercase__ : int = window_size
lowercase__ : Optional[Any] = mlp_ratio
lowercase__ : Union[str, Any] = qkv_bias
lowercase__ : Any = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : str = drop_path_rate
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ : str = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowercase__ : Dict = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowercase__ , lowercase__ : List[str] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 333 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=7):
lowercase__ : Any = None
if token is not None:
lowercase__ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowercase__ : Tuple = "636036"
lowercase__ : int = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowercase__ : List[str] = requests.get(_lowerCamelCase , headers=_lowerCamelCase).json()
return result["workflow_runs"]
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : Tuple = get_daily_ci_runs(_lowerCamelCase)
lowercase__ : List[str] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ : List[str] = workflow_run["id"]
break
return workflow_run_id
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Tuple):
lowercase__ : Tuple = get_last_daily_ci_runs(_lowerCamelCase)
if workflow_run_id is not None:
lowercase__ : Optional[Any] = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase)
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ : List[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : str):
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : Optional[int] = {}
for artifact_name in artifact_names:
lowercase__ : str = os.path.join(_lowerCamelCase , f'''{artifact_name}.zip''')
if os.path.isfile(_lowerCamelCase):
lowercase__ : Optional[int] = {}
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
with z.open(_lowerCamelCase) as f:
lowercase__ : Dict = f.read().decode("UTF-8")
return results
| 333 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int):
lowercase__ : list[list[str]] = [[] for _ in range(_lowerCamelCase)]
lowercase__ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1 or len(_lowerCamelCase) <= key:
return input_string
for position, character in enumerate(_lowerCamelCase):
lowercase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase__ : Optional[int] = min(_lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(_lowerCamelCase)
lowercase__ : Dict = ["".join(_lowerCamelCase) for row in temp_grid]
lowercase__ : Tuple = "".join(_lowerCamelCase)
return output_string
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int):
lowercase__ : Optional[int] = []
lowercase__ : Any = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1:
return input_string
lowercase__ : list[list[str]] = [[] for _ in range(_lowerCamelCase)] # generates template
for position in range(len(_lowerCamelCase)):
lowercase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase__ : Optional[int] = min(_lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append("*")
lowercase__ : Optional[Any] = 0
for row in temp_grid: # fills in the characters
lowercase__ : str = input_string[counter : counter + len(_lowerCamelCase)]
grid.append(list(_lowerCamelCase))
counter += len(_lowerCamelCase)
lowercase__ : Optional[int] = "" # reads as zigzag
for position in range(len(_lowerCamelCase)):
lowercase__ : int = position % (lowest * 2) # puts it in bounds
lowercase__ : List[Any] = min(_lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Tuple = {}
for key_guess in range(1 , len(_lowerCamelCase)): # tries every key
lowercase__ : int = decrypt(_lowerCamelCase , _lowerCamelCase)
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 1 |
UpperCamelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 333 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 1 |
# Lint as: python3
import itertools
import os
import re
UpperCamelCase = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
UpperCamelCase = re.compile(R'''([a-z\d])([A-Z])''')
UpperCamelCase = re.compile(R'''(?<!_)_(?!_)''')
UpperCamelCase = re.compile(R'''(_{2,})''')
UpperCamelCase = R'''^\w+(\.\w+)*$'''
UpperCamelCase = R'''<>:/\|?*'''
def lowercase_ ( _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = _uppercase_uppercase_re.sub(R"\1_\2" , _lowerCamelCase)
lowercase__ : Union[str, Any] = _lowercase_uppercase_re.sub(R"\1_\2" , _lowerCamelCase)
return name.lower()
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : Tuple = _single_underscore_re.split(_lowerCamelCase)
lowercase__ : str = [_multiple_underscores_re.split(_lowerCamelCase) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCamelCase) if n != "")
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if os.path.basename(_lowerCamelCase) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
return camelcase_to_snakecase(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]):
if os.path.basename(_lowerCamelCase) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
if not re.match(_split_re , _lowerCamelCase):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''')
return f'''{filename_prefix_for_name(_lowerCamelCase)}-{split}'''
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : str=None):
lowercase__ : int = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase)
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
lowercase__ : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase)
return f'''{filepath}*'''
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str=None , _lowerCamelCase : Dict=None):
lowercase__ : Any = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase)
if shard_lengths:
lowercase__ : int = len(_lowerCamelCase)
lowercase__ : Any = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCamelCase)]
if filetype_suffix:
lowercase__ : Optional[Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowercase__ : str = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 333 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowercase__ : Dict = [[1, 2, 4], [1, 2, 3, 4]]
lowercase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
self.assertTrue(isinstance(dc.token_ids , lowercase_ ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self : Dict ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowercase__ : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(lowercase_ ) # fails here
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Tuple = [[1, 2, 3], [1, 2, 4]]
lowercase__ : str = DisjunctiveConstraint(lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Tuple = dc.update(1 )
lowercase__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase__ , lowercase__ , lowercase__ : Tuple = dc.update(2 )
lowercase__ : Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase__ , lowercase__ , lowercase__ : List[str] = dc.update(3 )
lowercase__ : Optional[Any] = stepped is True and completed is True and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase__ : Dict = DisjunctiveConstraint(lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase__ , lowercase__ , lowercase__ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase__ , lowercase__ , lowercase__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase__ , lowercase__ , lowercase__ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 333 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case_ ( unittest.TestCase ,__A ):
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Union[str, Any] = load_tool("text-classification" )
self.tool.setup()
lowercase__ : List[str] = load_tool("text-classification" , remote=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : List[str] = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
lowercase__ : Union[str, Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : int = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
def __UpperCamelCase ( self : List[str] ) -> str:
lowercase__ : Optional[int] = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowercase_ , "positive" )
| 333 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 1 |
from math import factorial, pi
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : int = 30):
if not isinstance(_lowerCamelCase , (int, float)):
raise ValueError("maclaurin_sin() requires either an int or float for theta")
if not isinstance(_lowerCamelCase , _lowerCamelCase) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy")
lowercase__ : Optional[int] = float(_lowerCamelCase)
lowercase__ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1) for r in range(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : int = 30):
if not isinstance(_lowerCamelCase , (int, float)):
raise ValueError("maclaurin_cos() requires either an int or float for theta")
if not isinstance(_lowerCamelCase , _lowerCamelCase) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy")
lowercase__ : Optional[Any] = float(_lowerCamelCase)
lowercase__ : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r) for r in range(_lowerCamelCase))
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 333 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case_ ( __A ):
__A : Dict = "altclip_text_model"
def __init__( self : Optional[int] , lowercase_ : List[Any]=25_00_02 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=24 , lowercase_ : Dict=16 , lowercase_ : List[str]=40_96 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=5_14 , lowercase_ : Optional[Any]=1 , lowercase_ : Tuple=0.02 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[Any]=1E-05 , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=0 , lowercase_ : Tuple=2 , lowercase_ : str="absolute" , lowercase_ : Tuple=True , lowercase_ : List[Any]=7_68 , **lowercase_ : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase__ : Optional[int] = vocab_size
lowercase__ : str = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Tuple = hidden_act
lowercase__ : Optional[int] = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Optional[Any] = type_vocab_size
lowercase__ : Dict = initializer_range
lowercase__ : Tuple = initializer_factor
lowercase__ : List[str] = layer_norm_eps
lowercase__ : int = position_embedding_type
lowercase__ : str = use_cache
lowercase__ : Any = project_dim
class snake_case_ ( __A ):
__A : Any = "altclip_vision_model"
def __init__( self : Optional[int] , lowercase_ : Any=7_68 , lowercase_ : str=30_72 , lowercase_ : Dict=5_12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[int]=3 , lowercase_ : Optional[int]=2_24 , lowercase_ : str=32 , lowercase_ : int="quick_gelu" , lowercase_ : Any=1E-5 , lowercase_ : str=0.0 , lowercase_ : Any=0.02 , lowercase_ : Optional[Any]=1.0 , **lowercase_ : List[str] , ) -> List[Any]:
super().__init__(**lowercase_ )
lowercase__ : int = hidden_size
lowercase__ : Any = intermediate_size
lowercase__ : Any = projection_dim
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = num_channels
lowercase__ : Tuple = patch_size
lowercase__ : Dict = image_size
lowercase__ : Dict = initializer_range
lowercase__ : str = initializer_factor
lowercase__ : Dict = attention_dropout
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : str = hidden_act
@classmethod
def __UpperCamelCase ( cls : Tuple , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
lowercase__ , lowercase__ : Union[str, Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
lowercase__ : Any = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class snake_case_ ( __A ):
__A : Optional[int] = "altclip"
__A : Tuple = True
def __init__( self : Tuple , lowercase_ : str=None , lowercase_ : List[str]=None , lowercase_ : List[Any]=7_68 , lowercase_ : Union[str, Any]=2.65_92 , **lowercase_ : Optional[Any] ) -> Optional[int]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase__ : Any = kwargs.pop("text_config_dict" , lowercase_ )
lowercase__ : str = kwargs.pop("vision_config_dict" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase__ : Optional[int] = {}
# This is the complete result when using `text_config_dict`.
lowercase__ : int = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase__ : Tuple = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase__ : str = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase__ : List[Any] = {}
# This is the complete result when using `vision_config_dict`.
lowercase__ : Optional[Any] = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase__ : Optional[Any] = {
str(lowercase_ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase__ : Union[str, Any] = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase__ : Optional[Any] = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase__ : int = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
lowercase__ : Tuple = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
lowercase__ : Dict = AltCLIPTextConfig(**lowercase_ )
lowercase__ : Any = AltCLIPVisionConfig(**lowercase_ )
lowercase__ : Dict = projection_dim
lowercase__ : Tuple = logit_scale_init_value
lowercase__ : Optional[Any] = 1.0
@classmethod
def __UpperCamelCase ( cls : List[str] , lowercase_ : AltCLIPTextConfig , lowercase_ : AltCLIPVisionConfig , **lowercase_ : Any ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
lowercase__ : str = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.text_config.to_dict()
lowercase__ : int = self.vision_config.to_dict()
lowercase__ : Dict = self.__class__.model_type
return output
| 333 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
UpperCamelCase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
UpperCamelCase = '''▁'''
# Segments (not really needed)
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
class snake_case_ ( __A ):
__A : str = VOCAB_FILES_NAMES
__A : Any = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = "left"
__A : Dict = XLNetTokenizer
def __init__( self : Optional[Any] , lowercase_ : Any=None , lowercase_ : Optional[int]=None , lowercase_ : Optional[Any]=False , lowercase_ : str=True , lowercase_ : Dict=False , lowercase_ : List[Any]="<s>" , lowercase_ : List[str]="</s>" , lowercase_ : Tuple="<unk>" , lowercase_ : str="<sep>" , lowercase_ : Dict="<pad>" , lowercase_ : str="<cls>" , lowercase_ : Union[str, Any]="<mask>" , lowercase_ : Dict=["<eop>", "<eod>"] , **lowercase_ : Optional[int] , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = 3
lowercase__ : int = do_lower_case
lowercase__ : Union[str, Any] = remove_space
lowercase__ : Tuple = keep_accents
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Dict = False if not self.vocab_file else True
def __UpperCamelCase ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : Any = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __UpperCamelCase ( self : Tuple , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : str = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 333 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase = '''bart'''
UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase)
def lowercase_ ( ):
if LOAD_DENSE_INDEX:
lowercase__ : int = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased")
lowercase__ : List[str] = AutoModel.from_pretrained("yjernite/retribert-base-uncased").to("cuda:0")
lowercase__ : Optional[int] = qar_model.eval()
else:
lowercase__ , lowercase__ : Any = (None, None)
if MODEL_TYPE == "bart":
lowercase__ : str = AutoTokenizer.from_pretrained("yjernite/bart_eli5")
lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5").to("cuda:0")
lowercase__ : Union[str, Any] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth")
sas_model.load_state_dict(save_dict["model"])
lowercase__ : Union[str, Any] = sas_model.eval()
else:
lowercase__ , lowercase__ : Union[str, Any] = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0")
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase)
def lowercase_ ( ):
if LOAD_DENSE_INDEX:
lowercase__ : Any = faiss.StandardGpuResources()
lowercase__ : int = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0")["train"]
lowercase__ : int = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
lowercase__ : List[Any] = faiss.IndexFlatIP(128)
lowercase__ : str = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase)
wikiaab_gpu_index_flat.add(_lowerCamelCase) # TODO fix for larger GPU
else:
lowercase__ , lowercase__ : str = (None, None)
lowercase__ : int = Elasticsearch([{"host": "localhost", "port": "9200"}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Any = datasets.load_dataset("eli5" , name="LFQA_reddit")
lowercase__ : Optional[int] = elia["train_eli5"]
lowercase__ : Optional[int] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128))
lowercase__ : Optional[Any] = faiss.IndexFlatIP(128)
eli5_train_q_index.add(_lowerCamelCase)
return (elia_train, eli5_train_q_index)
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_indexes()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = load_models()
UpperCamelCase , UpperCamelCase = load_train_data()
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any]=10):
lowercase__ : Dict = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase)
lowercase__ , lowercase__ : Optional[Any] = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase)
lowercase__ : str = [elia_train[int(_lowerCamelCase)] for i in I[0]]
return nn_examples
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]="wiki40b" , _lowerCamelCase : int="dense" , _lowerCamelCase : Optional[Any]=10):
if source == "none":
lowercase__ , lowercase__ : str = (" <P> ".join(["" for _ in range(11)]).strip(), [])
else:
if method == "dense":
lowercase__ , lowercase__ : Tuple = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : Dict = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="english_wiki40b_snippets_100w" , n_results=_lowerCamelCase , )
lowercase__ : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
lowercase__ : int = "question: {} context: {}".format(_lowerCamelCase , _lowerCamelCase)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase: None),
})
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=64 , _lowerCamelCase : Optional[int]=256 , _lowerCamelCase : Dict=False , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : List[str]=0.95 , _lowerCamelCase : int=0.8):
with torch.no_grad():
lowercase__ : int = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
UpperCamelCase = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
UpperCamelCase = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
UpperCamelCase = st.sidebar.checkbox('''Demo options''')
if demo_options:
UpperCamelCase = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
UpperCamelCase = action_list.index(action_st)
UpperCamelCase = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
UpperCamelCase = show_type == '''Show full text of passages'''
else:
UpperCamelCase = 3
UpperCamelCase = True
UpperCamelCase = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
UpperCamelCase = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
UpperCamelCase = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
UpperCamelCase = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
UpperCamelCase = '''wiki40b'''
UpperCamelCase = '''dense'''
UpperCamelCase = '''beam'''
UpperCamelCase = 2
UpperCamelCase = 64
UpperCamelCase = 256
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = st.sidebar.checkbox('''Generation options''')
if generate_options:
UpperCamelCase = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
UpperCamelCase = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
UpperCamelCase = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCamelCase = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase = None
# start main text
UpperCamelCase = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
UpperCamelCase = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase = st.text_input('''Enter your question here:''', '''''')
else:
UpperCamelCase = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method='''dense''', n_results=10)
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase = support_list[:10]
UpperCamelCase = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
UpperCamelCase , UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase , UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
UpperCamelCase = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
UpperCamelCase = res[1].strip()
if sec_titles == "":
UpperCamelCase = '''[{}]({})'''.format(res[0], wiki_url)
else:
UpperCamelCase = sec_titles.split(''' & ''')
UpperCamelCase = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase = find_nearest_training(question)
UpperCamelCase = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
UpperCamelCase = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
UpperCamelCase = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 333 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class snake_case_ ( __A ):
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : Any ) -> str:
super().__init__(*lowercase_ , **lowercase_ )
requires_backends(self , "vision" )
self.check_model_type(lowercase_ )
def __call__( self : Tuple , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[int] ) -> Any:
return super().__call__(lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : int , **lowercase_ : str ) -> Union[str, Any]:
return {}, {}, {}
def __UpperCamelCase ( self : List[str] , lowercase_ : Any ) -> Dict:
lowercase__ : Any = load_image(lowercase_ )
lowercase__ : Tuple = image.size
lowercase__ : Optional[Any] = self.image_processor(images=lowercase_ , return_tensors=self.framework )
return model_inputs
def __UpperCamelCase ( self : Dict , lowercase_ : str ) -> Dict:
lowercase__ : Union[str, Any] = self.model(**lowercase_ )
return model_outputs
def __UpperCamelCase ( self : str , lowercase_ : str ) -> Dict:
lowercase__ : Union[str, Any] = model_outputs.predicted_depth
lowercase__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=lowercase_ )
lowercase__ : Optional[Any] = prediction.squeeze().cpu().numpy()
lowercase__ : Optional[Any] = (output * 2_55 / np.max(lowercase_ )).astype("uint8" )
lowercase__ : int = Image.fromarray(lowercase_ )
lowercase__ : Dict = {}
lowercase__ : Tuple = predicted_depth
lowercase__ : Dict = depth
return output_dict
| 333 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 1 |
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowercase_ ( _lowerCamelCase : int = 100):
lowercase__ : Any = 1
lowercase__ : str = 2
for i in range(2 , max_n + 1):
lowercase__ : List[str] = pre_numerator
lowercase__ : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ : Any = cur_numerator
lowercase__ : List[str] = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase)
if __name__ == "__main__":
print(f"{solution() = }")
| 333 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.