code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class __lowercase ( _lowerCAmelCase ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = IMAGENET_DEFAULT_MEAN , lowercase_ = IMAGENET_DEFAULT_STD , **lowercase_ , ) -> None:
super().__init__(**_lowerCAmelCase)
__snake_case = size if size is not None else {'shortest_edge': 2_2_4}
__snake_case = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase)
__snake_case = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case = get_size_dict(_lowerCAmelCase , param_name='crop_size')
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
__snake_case = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__snake_case = int((2_5_6 / 2_2_4) * size['shortest_edge'])
__snake_case = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase)
__snake_case = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}")
return resize(
_lowerCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase)
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
__snake_case = get_size_dict(_lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys \'height\' and \'width\'. Got {size.keys()}")
return center_crop(_lowerCAmelCase , size=(size['height'], size['width']) , data_format=_lowerCAmelCase , **_lowerCAmelCase)
def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase)
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> BatchFeature:
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase)
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(_lowerCAmelCase , param_name='crop_size')
__snake_case = make_list_of_images(_lowerCAmelCase)
if not valid_images(_lowerCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(_lowerCAmelCase) for image in images]
if do_resize:
__snake_case = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase) for image in images]
if do_rescale:
__snake_case = [self.rescale(_lowerCAmelCase , _lowerCAmelCase) for image in images]
if do_normalize:
__snake_case = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase) for image in images]
__snake_case = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase) for image in images]
__snake_case = {'pixel_values': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase)
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCAmelCase__ : Any = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : Optional[int] = "MobileNetV1Config"
# Base docstring
UpperCAmelCase__ : Any = "google/mobilenet_v1_1.0_224"
UpperCAmelCase__ : int = [1, 10_24, 7, 7]
# Image classification docstring
UpperCAmelCase__ : int = "google/mobilenet_v1_1.0_224"
UpperCAmelCase__ : Any = "tabby, tabby cat"
UpperCAmelCase__ : Dict = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A ( snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Tuple=None ) -> Dict:
'''simple docstring'''
__snake_case = {}
if isinstance(snake_case_ , snake_case_ ):
__snake_case = model.mobilenet_va
else:
__snake_case = model
__snake_case = """MobilenetV1/Conv2d_0/"""
__snake_case = backbone.conv_stem.convolution.weight
__snake_case = backbone.conv_stem.normalization.bias
__snake_case = backbone.conv_stem.normalization.weight
__snake_case = backbone.conv_stem.normalization.running_mean
__snake_case = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__snake_case = i + 1
__snake_case = i * 2
__snake_case = backbone.layer[pt_index]
__snake_case = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
__snake_case = backbone.layer[pt_index + 1]
__snake_case = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__snake_case = pointer.convolution.weight
__snake_case = pointer.normalization.bias
__snake_case = pointer.normalization.weight
__snake_case = pointer.normalization.running_mean
__snake_case = pointer.normalization.running_var
if isinstance(snake_case_ , snake_case_ ):
__snake_case = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
__snake_case = model.classifier.weight
__snake_case = model.classifier.bias
return tf_to_pt_map
def A ( snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__snake_case = tf.train.list_variables(snake_case_ )
__snake_case = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
__snake_case = tf.train.load_variable(snake_case_ , snake_case_ )
__snake_case = array
# Build TF to PyTorch weights loading map
__snake_case = _build_tf_to_pytorch_map(snake_case_ , snake_case_ , snake_case_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
__snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__snake_case = np.transpose(snake_case_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__snake_case = array.squeeze().transpose()
else:
__snake_case = np.transpose(snake_case_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
__snake_case = torch.from_numpy(snake_case_ )
tf_weights.pop(snake_case_ , snake_case_ )
tf_weights.pop(name + '/RMSProp' , snake_case_ )
tf_weights.pop(name + '/RMSProp_1' , snake_case_ )
tf_weights.pop(name + '/ExponentialMovingAverage' , snake_case_ )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A ( snake_case__ : str , snake_case__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = features.shape[-2:]
__snake_case = conv_layer.stride
__snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
__snake_case = max(kernel_height - stride_height , 0 )
else:
__snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__snake_case = max(kernel_width - stride_width , 0 )
else:
__snake_case = max(kernel_width - (in_width % stride_width) , 0 )
__snake_case = pad_along_width // 2
__snake_case = pad_along_width - pad_left
__snake_case = pad_along_height // 2
__snake_case = pad_along_height - pad_top
__snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case_ , snake_case_ , 'constant' , 0.0 )
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = False , lowercase_ = True , lowercase_ = True , ) -> None:
super().__init__()
__snake_case = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
__snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__snake_case = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , stride=_a , padding=_a , groups=_a , bias=_a , padding_mode='zeros' , )
if use_normalization:
__snake_case = nn.BatchNormad(
num_features=_a , eps=config.layer_norm_eps , momentum=0.9997 , affine=_a , track_running_stats=_a , )
else:
__snake_case = None
if use_activation:
if isinstance(_a , _a):
__snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _a):
__snake_case = ACTaFN[config.hidden_act]
else:
__snake_case = config.hidden_act
else:
__snake_case = None
def _a ( self , lowercase_) -> torch.Tensor:
if self.config.tf_padding:
__snake_case = apply_tf_padding(_a , self.convolution)
__snake_case = self.convolution(_a)
if self.normalization is not None:
__snake_case = self.normalization(_a)
if self.activation is not None:
__snake_case = self.activation(_a)
return features
class __lowercase ( UpperCamelCase__ ):
__UpperCAmelCase = MobileNetVaConfig
__UpperCAmelCase = load_tf_weights_in_mobilenet_va
__UpperCAmelCase = '''mobilenet_v1'''
__UpperCAmelCase = '''pixel_values'''
__UpperCAmelCase = False
def _a ( self , lowercase_) -> None:
if isinstance(_a , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
UpperCAmelCase__ : Any = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCamelCase__ , )
class __lowercase ( UpperCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ = True) -> Union[str, Any]:
super().__init__(_a)
__snake_case = config
__snake_case = 3_2
__snake_case = max(int(depth * config.depth_multiplier) , config.min_depth)
__snake_case = MobileNetVaConvLayer(
_a , in_channels=config.num_channels , out_channels=_a , kernel_size=3 , stride=2 , )
__snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__snake_case = nn.ModuleList()
for i in range(1_3):
__snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__snake_case = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=3 , stride=strides[i] , groups=_a , ))
self.layer.append(
MobileNetVaConvLayer(
_a , in_channels=_a , out_channels=_a , kernel_size=1 , ))
__snake_case = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _a ( self , lowercase_) -> List[Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_a)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__snake_case = self.conv_stem(_a)
__snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__snake_case = layer_module(_a)
if output_hidden_states:
__snake_case = all_hidden_states + (hidden_states,)
__snake_case = hidden_states
if self.pooler is not None:
__snake_case = torch.flatten(self.pooler(_a) , start_dim=1)
else:
__snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=_a , )
@add_start_docstrings(
'''\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , UpperCamelCase__ , )
class __lowercase ( UpperCamelCase__ ):
def __init__( self , lowercase_) -> None:
super().__init__(_a)
__snake_case = config.num_labels
__snake_case = MobileNetVaModel(_a)
__snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=_a)
__snake_case = nn.Linear(_a , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.mobilenet_va(_a , output_hidden_states=_a , return_dict=_a)
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier(self.dropout(_a))
__snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__snake_case = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__snake_case = """single_label_classification"""
else:
__snake_case = """multi_label_classification"""
if self.config.problem_type == "regression":
__snake_case = MSELoss()
if self.num_labels == 1:
__snake_case = loss_fct(logits.squeeze() , labels.squeeze())
else:
__snake_case = loss_fct(_a , _a)
elif self.config.problem_type == "single_label_classification":
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__snake_case = BCEWithLogitsLoss()
__snake_case = loss_fct(_a , _a)
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , )
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from math import isqrt
def A ( snake_case__ : int ) -> list[int]:
'''simple docstring'''
__snake_case = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
__snake_case = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def A ( snake_case__ : int = 10**8 ) -> int:
'''simple docstring'''
__snake_case = calculate_prime_numbers(max_number // 2 )
__snake_case = 0
__snake_case = 0
__snake_case = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowercase ( lowerCAmelCase__ ):
def __init__( self , lowercase_=0.01 , lowercase_=1_0_0_0) -> List[Any]:
__snake_case = p_stop
__snake_case = max_length
def __iter__( self) -> Optional[Any]:
__snake_case = 0
__snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case = random.random() < self.p_stop
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ , lowercase_=False , lowercase_=True) -> Tuple:
__snake_case = [
BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
for i in range(2)
]
__snake_case = [list(_lowerCamelCase) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowerCamelCase) for shard in batch_sampler_shards] , [len(_lowerCamelCase) for e in expected])
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
def _a ( self) -> Dict:
__snake_case = BatchSampler(range(2_4) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
__snake_case = BatchSampler(range(2_4) , batch_size=3 , drop_last=_lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(2_1) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
__snake_case = BatchSampler(range(2_1) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(2_2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
__snake_case = BatchSampler(range(2_2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(2_0) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
__snake_case = BatchSampler(range(2_0) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
__snake_case = BatchSampler(range(2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase)
def _a ( self) -> Optional[int]:
__snake_case = BatchSampler(range(2_4) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_4) , batch_size=4 , drop_last=_lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(2_2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(2_1) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_1) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase)
def _a ( self) -> Dict:
__snake_case = BatchSampler(range(2_4) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_4) , batch_size=3 , drop_last=_lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case = BatchSampler(range(2_1) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_1) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(2_2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case = BatchSampler(range(2_0) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_0) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2) , batch_size=3 , drop_last=_lowerCamelCase)
__snake_case = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , even_batches=_lowerCamelCase)
def _a ( self) -> Union[str, Any]:
__snake_case = BatchSampler(range(2_4) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_4) , batch_size=4 , drop_last=_lowerCamelCase)
# Expected shouldn't change
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case = BatchSampler(range(2_2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case = BatchSampler(range(2_1) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2_1) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
# Check the shards when the dataset is very small.
__snake_case = BatchSampler(range(2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
__snake_case = BatchSampler(range(2) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = [[], []]
self.check_batch_sampler_shards(_lowerCamelCase , _lowerCamelCase , split_batches=_lowerCamelCase , even_batches=_lowerCamelCase)
def _a ( self) -> Dict:
__snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
__snake_case = [BatchSamplerShard(_lowerCamelCase , 2 , _lowerCamelCase , even_batches=_lowerCamelCase) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]) , 3)
self.assertEqual(len(batch_sampler_shards[1]) , 2)
self.assertListEqual(list(batch_sampler_shards[0]) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]])
self.assertListEqual(list(batch_sampler_shards[1]) , [[3, 4], [9, 1_0, 1_1]])
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False , lowercase_=2 , lowercase_=False) -> str:
random.seed(_lowerCamelCase)
__snake_case = list(_lowerCamelCase)
__snake_case = [
IterableDatasetShard(
_lowerCamelCase , batch_size=_lowerCamelCase , drop_last=_lowerCamelCase , num_processes=_lowerCamelCase , process_index=_lowerCamelCase , split_batches=_lowerCamelCase , )
for i in range(_lowerCamelCase)
]
__snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowerCamelCase)
iterable_dataset_lists.append(list(_lowerCamelCase))
__snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowerCamelCase) , len(_lowerCamelCase))
self.assertTrue(len(_lowerCamelCase) % shard_batch_size == 0)
__snake_case = []
for idx in range(0 , len(_lowerCamelCase) , _lowerCamelCase):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowerCamelCase) < len(_lowerCamelCase):
reference += reference
self.assertListEqual(_lowerCamelCase , reference[: len(_lowerCamelCase)])
def _a ( self) -> Optional[Any]:
__snake_case = 4_2
__snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
# Edge case with a very small dataset
__snake_case = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
self.check_iterable_dataset_shards(_lowerCamelCase , _lowerCamelCase , batch_size=4 , drop_last=_lowerCamelCase , split_batches=_lowerCamelCase)
def _a ( self) -> List[str]:
__snake_case = BatchSampler(range(1_6) , batch_size=4 , drop_last=_lowerCamelCase)
__snake_case = SkipBatchSampler(_lowerCamelCase , 2)
self.assertListEqual(list(_lowerCamelCase) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]])
def _a ( self) -> Tuple:
__snake_case = SkipDataLoader(list(range(1_6)) , batch_size=4 , skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]])
def _a ( self) -> Optional[Any]:
__snake_case = DataLoader(list(range(1_6)) , batch_size=4)
__snake_case = skip_first_batches(_lowerCamelCase , num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]])
def _a ( self) -> Optional[Any]:
__snake_case = DataLoaderShard(list(range(1_6)) , batch_size=4)
for idx, _ in enumerate(_lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
def _a ( self) -> str:
Accelerator()
__snake_case = DataLoaderDispatcher(range(1_6) , batch_size=4)
for idx, _ in enumerate(_lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(_lowerCamelCase):
self.assertEqual(dataloader.end_of_dataloader , idx == 3)
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def A ( snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
def A ( snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__snake_case = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__snake_case = value
else:
__snake_case = value
return new_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int]=False ) -> str:
'''simple docstring'''
__snake_case = ""
if is_panoptic:
__snake_case = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__snake_case = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__snake_case = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[:256, :]
__snake_case = in_proj_bias[:256]
__snake_case = in_proj_weight[256:512, :]
__snake_case = in_proj_bias[256:512]
__snake_case = in_proj_weight[-256:, :]
__snake_case = in_proj_bias[-256:]
def A ( ) -> Optional[int]:
'''simple docstring'''
__snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def A ( snake_case__ : Any , snake_case__ : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__snake_case = "resnet101"
if "dc5" in model_name:
__snake_case = True
__snake_case = "panoptic" in model_name
if is_panoptic:
__snake_case = 250
else:
__snake_case = 91
__snake_case = "huggingface/label-files"
__snake_case = "coco-detection-id2label.json"
__snake_case = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
# load image processor
__snake_case = "coco_panoptic" if is_panoptic else "coco_detection"
__snake_case = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
__snake_case = prepare_img()
__snake_case = image_processor(images=__snake_case , return_tensors='pt' )
__snake_case = encoding["pixel_values"]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
__snake_case = torch.hub.load('DeppMeng/ConditionalDETR' , __snake_case , pretrained=__snake_case ).eval()
__snake_case = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__snake_case = "conditional_detr." + src
rename_key(__snake_case , __snake_case , __snake_case )
__snake_case = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__snake_case = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__snake_case = state_dict.pop(__snake_case )
__snake_case = val
# finally, create HuggingFace model and load state dict
__snake_case = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
__snake_case = conditional_detr(__snake_case )
__snake_case = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : int = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ["PoolFormerFeatureExtractor"]
UpperCAmelCase__ : List[str] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : int = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = ["ConvNextFeatureExtractor"]
UpperCAmelCase__ : Union[str, Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( __UpperCAmelCase ):
lowerCAmelCase__ = (DDIMParallelScheduler,)
lowerCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _a ( self , **lowercase_) -> List[str]:
__snake_case = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__SCREAMING_SNAKE_CASE)
return config
def _a ( self , **lowercase_) -> Tuple:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE)
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE)
__snake_case , __snake_case = 1_0, 0.0
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE)
for t in scheduler.timesteps:
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__snake_case = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).prev_sample
return sample
def _a ( self) -> str:
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Optional[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE)
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config(steps_offset=1)
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1]))
def _a ( self) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Optional[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Union[str, Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__SCREAMING_SNAKE_CASE)
def _a ( self) -> str:
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def _a ( self) -> List[Any]:
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Tuple:
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0]):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Optional[int]:
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE)
def _a ( self) -> Any:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0) - 0.1_4771)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0) - 0.3_2460)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8) - 0.02)) < 1e-5
def _a ( self) -> Union[str, Any]:
__snake_case = self.scheduler_classes[0]
__snake_case = self.get_scheduler_config()
__snake_case = scheduler_class(**__SCREAMING_SNAKE_CASE)
__snake_case , __snake_case = 1_0, 0.0
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE)
__snake_case = self.dummy_model()
__snake_case = self.dummy_sample_deter
__snake_case = self.dummy_sample_deter + 0.1
__snake_case = self.dummy_sample_deter - 0.1
__snake_case = samplea.shape[0]
__snake_case = torch.stack([samplea, samplea, samplea] , dim=0)
__snake_case = torch.arange(__SCREAMING_SNAKE_CASE)[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE)
__snake_case = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
__snake_case = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , __SCREAMING_SNAKE_CASE)
__snake_case = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4) < 1e-2
assert abs(result_mean.item() - 0.4982) < 1e-3
def _a ( self) -> Optional[Any]:
__snake_case = self.full_loop()
__snake_case = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 1_7_2.0_0_6_7) < 1e-2
assert abs(result_mean.item() - 0.22_3967) < 1e-3
def _a ( self) -> Optional[Any]:
__snake_case = self.full_loop(prediction_type='v_prediction')
__snake_case = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 52.5302) < 1e-2
assert abs(result_mean.item() - 0.0684) < 1e-3
def _a ( self) -> Dict:
__snake_case = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01)
__snake_case = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 1_4_9.8_2_9_5) < 1e-2
assert abs(result_mean.item() - 0.1951) < 1e-3
def _a ( self) -> Optional[int]:
__snake_case = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01)
__snake_case = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE))
__snake_case = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE))
assert abs(result_sum.item() - 1_4_9.0_7_8_4) < 1e-2
assert abs(result_mean.item() - 0.1941) < 1e-3
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowercase ( UpperCamelCase_ ):
__UpperCAmelCase = '''vit_msn'''
def __init__( self , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-06 , lowercase_=2_2_4 , lowercase_=1_6 , lowercase_=3 , lowercase_=True , **lowercase_ , ) -> Any:
super().__init__(**__a)
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( snake_case__ : Optional[Any] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = ParquetDatasetReader(_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[Any] ) -> str:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = ParquetDatasetReader(_lowercase , features=_lowercase , cache_dir=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = ParquetDatasetReader(_lowercase , cache_dir=_lowercase , split=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def A ( snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
if issubclass(_lowercase , _lowercase ):
__snake_case = parquet_path
elif issubclass(_lowercase , _lowercase ):
__snake_case = [parquet_path]
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = ParquetDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_parquet_dataset(_lowercase , _lowercase )
def A ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : List[str]=("train",) ) -> List[Any]:
'''simple docstring'''
assert isinstance(_lowercase , _lowercase )
for split in splits:
__snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__snake_case = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=_lowercase , keep_in_memory=_lowercase ).read()
_check_parquet_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = features.copy() if features else default_expected_features
__snake_case = (
Features({feature: Value(_lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__snake_case = ParquetDatasetReader({'train': parquet_path} , features=_lowercase , cache_dir=_lowercase ).read()
_check_parquet_datasetdict(_lowercase , _lowercase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def A ( snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
if split:
__snake_case = {split: parquet_path}
else:
__snake_case = 'train'
__snake_case = {'train': parquet_path, 'test': parquet_path}
__snake_case = tmp_path / 'cache'
__snake_case = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__snake_case = ParquetDatasetReader(_lowercase , cache_dir=_lowercase ).read()
_check_parquet_datasetdict(_lowercase , _lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( snake_case__ : Dict , snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = ParquetDatasetWriter(_lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__snake_case = pq.ParquetFile(tmp_path / 'foo.parquet' )
__snake_case = pf.read()
assert dataset.data.table == output_table
def A ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
__snake_case = str(shared_datadir / 'test_image_rgb.jpg' )
__snake_case = {'image': [image_path]}
__snake_case = Features({'image': Image()} )
__snake_case = Dataset.from_dict(_lowercase , features=_lowercase )
__snake_case = ParquetDatasetWriter(_lowercase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__snake_case = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
__snake_case = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=_lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def A ( snake_case__ : str , snake_case__ : Optional[Any] ) -> Any:
'''simple docstring'''
assert get_writer_batch_size(_lowercase ) == expected
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
__UpperCAmelCase = '''vit_msn'''
def __init__( self , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1e-06 , lowercase_=2_2_4 , lowercase_=1_6 , lowercase_=3 , lowercase_=True , **lowercase_ , ) -> str:
super().__init__(**_lowercase)
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''linear'''
__UpperCAmelCase = '''cosine'''
__UpperCAmelCase = '''cosine_with_restarts'''
__UpperCAmelCase = '''polynomial'''
__UpperCAmelCase = '''constant'''
__UpperCAmelCase = '''constant_with_warmup'''
__UpperCAmelCase = '''piecewise_constant'''
def A ( snake_case__ : Optimizer , snake_case__ : int = -1 ) -> List[str]:
'''simple docstring'''
return LambdaLR(lowercase_ , lambda snake_case__ : 1 , last_epoch=lowercase_ )
def A ( snake_case__ : Optimizer , snake_case__ : int , snake_case__ : int = -1 ) -> str:
'''simple docstring'''
def lr_lambda(snake_case__ : int ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1.0 , lowercase_ ) )
return 1.0
return LambdaLR(lowercase_ , lowercase_ , last_epoch=lowercase_ )
def A ( snake_case__ : Optimizer , snake_case__ : str , snake_case__ : int = -1 ) -> List[str]:
'''simple docstring'''
__snake_case = {}
__snake_case = step_rules.split(',' )
for rule_str in rule_list[:-1]:
__snake_case = rule_str.split(':' )
__snake_case = int(lowercase_ )
__snake_case = float(lowercase_ )
__snake_case = value
__snake_case = float(rule_list[-1] )
def create_rules_function(snake_case__ : str , snake_case__ : Union[str, Any] ):
def rule_func(snake_case__ : int ) -> float:
__snake_case = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowercase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__snake_case = create_rules_function(lowercase_ , lowercase_ )
return LambdaLR(lowercase_ , lowercase_ , last_epoch=lowercase_ )
def A ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int]=-1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(snake_case__ : int ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
def A ( snake_case__ : Optimizer , snake_case__ : int , snake_case__ : int , snake_case__ : float = 0.5 , snake_case__ : int = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(snake_case__ : Dict ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
__snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowercase_ ) * 2.0 * progress )) )
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
def A ( snake_case__ : Optimizer , snake_case__ : int , snake_case__ : int , snake_case__ : int = 1 , snake_case__ : int = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(snake_case__ : str ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
__snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowercase_ ) * progress) % 1.0) )) )
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Dict=1e-7 , snake_case__ : List[str]=1.0 , snake_case__ : Tuple=-1 ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(snake_case__ : int ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__snake_case = lr_init - lr_end
__snake_case = num_training_steps - num_warmup_steps
__snake_case = 1 - (current_step - num_warmup_steps) / decay_steps
__snake_case = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase__ : Dict = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A ( snake_case__ : Union[str, SchedulerType] , snake_case__ : Optimizer , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 1 , snake_case__ : float = 1.0 , snake_case__ : int = -1 , ) -> str:
'''simple docstring'''
__snake_case = SchedulerType(lowercase_ )
__snake_case = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowercase_ , last_epoch=lowercase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowercase_ , step_rules=lowercase_ , last_epoch=lowercase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowercase_ , num_warmup_steps=lowercase_ , last_epoch=lowercase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowercase_ , num_warmup_steps=lowercase_ , num_training_steps=lowercase_ , num_cycles=lowercase_ , last_epoch=lowercase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowercase_ , num_warmup_steps=lowercase_ , num_training_steps=lowercase_ , power=lowercase_ , last_epoch=lowercase_ , )
return schedule_func(
lowercase_ , num_warmup_steps=lowercase_ , num_training_steps=lowercase_ , last_epoch=lowercase_ )
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
__snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__snake_case = np.concatenate(lowerCAmelCase__ , axis=0 )
__snake_case = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 255.0
__snake_case = image.transpose(0 , 3 , 1 , 2 )
__snake_case = 2.0 * image - 1.0
__snake_case = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__snake_case = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def A ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[int]=0.9_995 ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__snake_case = True
__snake_case = va.device
__snake_case = va.cpu().numpy()
__snake_case = va.cpu().numpy()
__snake_case = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__snake_case = (1 - t) * va + t * va
else:
__snake_case = np.arccos(lowerCAmelCase__ )
__snake_case = np.sin(lowerCAmelCase__ )
__snake_case = theta_a * t
__snake_case = np.sin(lowerCAmelCase__ )
__snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
__snake_case = sin_theta_t / sin_theta_a
__snake_case = sa * va + sa * va
if inputs_are_torch:
__snake_case = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def A ( snake_case__ : Optional[Any] , snake_case__ : List[Any] ) -> int:
'''simple docstring'''
__snake_case = F.normalize(lowerCAmelCase__ , dim=-1 )
__snake_case = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A ( snake_case__ : Optional[Any] , snake_case__ : Any ) -> List[str]:
'''simple docstring'''
for param in model.parameters():
__snake_case = value
class __lowercase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , _a)
else feature_extractor.size['shortest_edge']
)
__snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , _a)
set_requires_grad(self.clip_model , _a)
def _a ( self , lowercase_ = "auto") -> Optional[int]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a)
def _a ( self) -> Optional[int]:
self.enable_attention_slicing(_a)
def _a ( self) -> Any:
set_requires_grad(self.vae , _a)
def _a ( self) -> Any:
set_requires_grad(self.vae , _a)
def _a ( self) -> Optional[Any]:
set_requires_grad(self.unet , _a)
def _a ( self) -> int:
set_requires_grad(self.unet , _a)
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
# get the original timestep using init_timestep
__snake_case = min(int(num_inference_steps * strength) , _a)
__snake_case = max(num_inference_steps - init_timestep , 0)
__snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> Any:
if not isinstance(_a , torch.Tensor):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(_a)}")
__snake_case = image.to(device=_a , dtype=_a)
if isinstance(_a , _a):
__snake_case = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(_a)
]
__snake_case = torch.cat(_a , dim=0)
else:
__snake_case = self.vae.encode(_a).latent_dist.sample(_a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 0.1_8215 * init_latents
__snake_case = init_latents.repeat_interleave(_a , dim=0)
__snake_case = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a)
# get latents
__snake_case = self.scheduler.add_noise(_a , _a , _a)
__snake_case = init_latents
return latents
def _a ( self , lowercase_) -> List[str]:
__snake_case = self.coca_transform(_a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
__snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
__snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split('<end_of_text>')[0].replace('<start_of_text>' , '').rstrip(' .,')
def _a ( self , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self.feature_extractor.preprocess(_a)
__snake_case = torch.from_numpy(clip_image_input['pixel_values'][0]).unsqueeze(0).to(self.device).half()
__snake_case = self.clip_model.get_image_features(_a)
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a)
__snake_case = image_embeddings_clip.repeat_interleave(_a , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int:
__snake_case = latents.detach().requires_grad_()
__snake_case = self.scheduler.scale_model_input(_a , _a)
# predict the noise residual
__snake_case = self.unet(_a , _a , encoder_hidden_states=_a).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
__snake_case = self.scheduler.alphas_cumprod[timestep]
__snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__snake_case = torch.sqrt(_a)
__snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a):
__snake_case = self.scheduler.sigmas[index]
__snake_case = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.1_8215 * sample
__snake_case = self.vae.decode(_a).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
__snake_case = transforms.Resize(self.feature_extractor_size)(_a)
__snake_case = self.normalize(_a).to(latents.dtype)
__snake_case = self.clip_model.get_image_features(_a)
__snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a)
__snake_case = spherical_dist_loss(_a , _a).mean() * clip_guidance_scale
__snake_case = -torch.autograd.grad(_a , _a)[0]
if isinstance(self.scheduler , _a):
__snake_case = latents.detach() + grads * (sigma**2)
__snake_case = noise_pred_original
else:
__snake_case = noise_pred_original - torch.sqrt(_a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 0.6 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = 1_0_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = 0.8 , lowercase_ = 0.1 , lowercase_ = 0.1 , ) -> List[str]:
if isinstance(_a , _a) and len(_a) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(_a)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(_a , torch.Generator) and batch_size > 1:
__snake_case = [generator] + [None] * (batch_size - 1)
__snake_case = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__snake_case = [x[0] for x in coca_is_none if x[1]]
__snake_case = ', '.join(_a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
__snake_case = self.get_image_description(_a)
if style_prompt is None:
if len(_a):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
__snake_case = self.get_image_description(_a)
# get prompt text embeddings for content and style
__snake_case = self.tokenizer(
_a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='pt' , )
__snake_case = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
__snake_case = self.tokenizer(
_a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='pt' , )
__snake_case = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
__snake_case = slerp(_a , _a , _a)
# duplicate text embeddings for each generation per prompt
__snake_case = text_embeddings.repeat_interleave(_a , dim=0)
# set timesteps
__snake_case = 'offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
__snake_case = {}
if accepts_offset:
__snake_case = 1
self.scheduler.set_timesteps(_a , **_a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
__snake_case , __snake_case = self.get_timesteps(_a , _a , self.device)
__snake_case = timesteps[:1].repeat(_a)
# Preprocess image
__snake_case = preprocess(_a , _a , _a)
__snake_case = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a)
__snake_case = preprocess(_a , _a , _a)
__snake_case = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a)
__snake_case = slerp(_a , _a , _a)
if clip_guidance_scale > 0:
__snake_case = self.get_clip_image_embeddings(_a , _a)
__snake_case = self.get_clip_image_embeddings(_a , _a)
__snake_case = slerp(
_a , _a , _a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = content_text_input.input_ids.shape[-1]
__snake_case = self.tokenizer([''] , padding='max_length' , max_length=_a , return_tensors='pt')
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
__snake_case = uncond_embeddings.repeat_interleave(_a , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__snake_case = torch.randn(_a , generator=_a , device='cpu' , dtype=_a).to(
self.device)
else:
__snake_case = torch.randn(_a , generator=_a , device=self.device , dtype=_a)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
# check if the scheduler accepts generator
__snake_case = 'generator' in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
__snake_case = generator
with self.progress_bar(total=_a):
for i, t in enumerate(_a):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(_a , _a)
# predict the noise residual
__snake_case = self.unet(_a , _a , encoder_hidden_states=_a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__snake_case = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
__snake_case , __snake_case = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(_a , _a , _a , **_a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(_a).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(_a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a)
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
def A ( snake_case__ : int ) -> str:
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCAmelCase__ : Any = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowercase ( snake_case_ ):
__UpperCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
__UpperCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__UpperCAmelCase = field(
default=snake_case_ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
__UpperCAmelCase = field(
default=snake_case_ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _a ( self) -> Union[str, Any]:
__snake_case = super().to_dict()
for k, v in d.items():
if isinstance(lowercase_ , lowercase_):
__snake_case = v.to_dict()
return d
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
def A ( snake_case__ : Dict ) -> Any:
'''simple docstring'''
__snake_case = len(SCREAMING_SNAKE_CASE_ )
__snake_case = sum(SCREAMING_SNAKE_CASE_ )
__snake_case = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case = True
for i in range(1 , s + 1 ):
__snake_case = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case = s - 2 * j
break
return diff
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
from collections.abc import Callable
def A ( snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Dict ) -> float:
'''simple docstring'''
__snake_case = a
__snake_case = b
if function(SCREAMING_SNAKE_CASE_ ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE_ ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__snake_case = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE_ ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE_ ) * function(SCREAMING_SNAKE_CASE_ ) < 0:
__snake_case = mid
else:
__snake_case = mid
__snake_case = start + (end - start) / 2.0
return mid
def A ( snake_case__ : Optional[Any] ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowercase ( __A , unittest.TestCase ):
__UpperCAmelCase = XGLMTokenizer
__UpperCAmelCase = XGLMTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def _a ( self) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = XGLMTokenizer(lowercase_ , keep_accents=lowercase_)
tokenizer.save_pretrained(self.tmpdirname)
def _a ( self) -> Optional[int]:
__snake_case = '<pad>'
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowercase_) , 1_0_0_8)
def _a ( self) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def _a ( self) -> List[Any]:
__snake_case = XGLMTokenizer(lowercase_ , keep_accents=lowercase_)
__snake_case = tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__snake_case = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _a ( self) -> Any:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def _a ( self) -> List[str]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name)
__snake_case = XGLMTokenizer(f.name , keep_accents=lowercase_)
__snake_case = pickle.dumps(lowercase_)
pickle.loads(lowercase_)
def _a ( self) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = 'I was born in 92000, and this is falsé.'
__snake_case = tokenizer.tokenize(lowercase_)
__snake_case = rust_tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
__snake_case = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(lowercase_)
__snake_case = rust_tokenizer.encode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
@slow
def _a ( self) -> str:
__snake_case = 'Hello World!'
__snake_case = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_))
@slow
def _a ( self) -> Tuple:
__snake_case = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
__snake_case = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_))
@slow
def _a ( self) -> List[Any]:
__snake_case = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='facebook/xglm-564M' , padding=lowercase_ , )
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=3_2 , lowercase_=3 , lowercase_=1_0 , lowercase_=[1_0, 2_0, 3_0, 4_0] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = embeddings_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = scope
__snake_case = len(_A)
def _a ( self) -> Optional[int]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = self.get_config()
return config, pixel_values
def _a ( self) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a ( self , lowercase_ , lowercase_) -> int:
__snake_case = FlaxRegNetModel(config=_A)
__snake_case = model(_A)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _a ( self , lowercase_ , lowercase_) -> Union[str, Any]:
__snake_case = self.num_labels
__snake_case = FlaxRegNetForImageClassification(config=_A)
__snake_case = model(_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _a ( self) -> List[str]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowercase ( __lowercase , unittest.TestCase ):
__UpperCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Any:
__snake_case = FlaxRegNetModelTester(self)
__snake_case = ConfigTester(self , config_class=_A , has_text_modality=_A)
def _a ( self) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self) -> str:
return
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A)
@unittest.skip(reason='RegNet does not use inputs_embeds')
def _a ( self) -> Optional[Any]:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings')
def _a ( self) -> int:
pass
def _a ( self) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_A)
__snake_case = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A)
def _a ( self) -> Any:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
__snake_case = model_class(_A)
__snake_case = model(**self._prepare_for_class(_A , _A))
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(_A) , expected_num_stages + 1)
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(_A , _A , _A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(_A , _A , _A)
def _a ( self) -> Dict:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__snake_case = self._prepare_for_class(_A , _A)
__snake_case = model_class(_A)
@jax.jit
def model_jitted(lowercase_ , **lowercase_):
return model(pixel_values=_A , **_A)
with self.subTest('JIT Enabled'):
__snake_case = model_jitted(**_A).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__snake_case = model_jitted(**_A).to_tuple()
self.assertEqual(len(_A) , len(_A))
for jitted_output, output in zip(_A , _A):
self.assertEqual(jitted_output.shape , output.shape)
def A ( ) -> Optional[int]:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> Optional[int]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None
@slow
def _a ( self) -> Union[str, Any]:
__snake_case = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040')
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_A , return_tensors='np')
__snake_case = model(**_A)
# verify the logits
__snake_case = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , _A)
__snake_case = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCAmelCase__ : Optional[int] = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
UpperCAmelCase__ : List[str] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
UpperCAmelCase__ : Tuple = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _a ( self) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=True , lowercase_=False) -> List[str]:
if rouge_types is None:
__snake_case = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
__snake_case = rouge_scorer.RougeScorer(rouge_types=snake_case__ , use_stemmer=snake_case__)
if use_aggregator:
__snake_case = scoring.BootstrapAggregator()
else:
__snake_case = []
for ref, pred in zip(snake_case__ , snake_case__):
__snake_case = scorer.score(snake_case__ , snake_case__)
if use_aggregator:
aggregator.add_scores(snake_case__)
else:
scores.append(snake_case__)
if use_aggregator:
__snake_case = aggregator.aggregate()
else:
__snake_case = {}
for key in scores[0]:
__snake_case = [score[key] for score in scores]
return result
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
from __future__ import annotations
lowerCAmelCase : Optional[int] = []
def A ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
for i in range(len(lowerCAmelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowerCAmelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowerCAmelCase_ , -1 , -1 ) , range(lowerCAmelCase_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowerCAmelCase_ , -1 , -1 ) , range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def A ( snake_case__ : int , snake_case__ : Union[str, Any] ) -> Any:
'''simple docstring'''
if row >= len(lowerCAmelCase_ ):
solution.append(lowerCAmelCase_ )
printboard(lowerCAmelCase_ )
print()
return True
for i in range(len(lowerCAmelCase_ ) ):
if is_safe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__snake_case = 1
solve(lowerCAmelCase_ , row + 1 )
__snake_case = 0
return False
def A ( snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for i in range(len(lowerCAmelCase_ ) ):
for j in range(len(lowerCAmelCase_ ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : Tuple = 8
lowerCAmelCase : Union[str, Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = "Hello world! cécé herlolip"
def A ( snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
__snake_case = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
__snake_case = roberta.model.encoder.sentence_encoder
__snake_case = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _snake_case )
__snake_case = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
__snake_case = roberta_sent_encoder.embed_tokens.weight
__snake_case = roberta_sent_encoder.embed_positions.weight
__snake_case = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__snake_case = roberta_sent_encoder.layer_norm.weight
__snake_case = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__snake_case = model.roberta.encoder.layer[i]
__snake_case = roberta_sent_encoder.layers[i]
__snake_case = layer.attention
__snake_case = roberta_layer.self_attn_layer_norm.weight
__snake_case = roberta_layer.self_attn_layer_norm.bias
# self attention
__snake_case = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__snake_case = roberta_layer.self_attn.q_proj.weight
__snake_case = roberta_layer.self_attn.q_proj.bias
__snake_case = roberta_layer.self_attn.k_proj.weight
__snake_case = roberta_layer.self_attn.k_proj.bias
__snake_case = roberta_layer.self_attn.v_proj.weight
__snake_case = roberta_layer.self_attn.v_proj.bias
# self-attention output
__snake_case = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__snake_case = roberta_layer.self_attn.out_proj.weight
__snake_case = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__snake_case = roberta_layer.final_layer_norm.weight
__snake_case = roberta_layer.final_layer_norm.bias
# intermediate
__snake_case = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case = roberta_layer.fca.weight
__snake_case = roberta_layer.fca.bias
# output
__snake_case = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__snake_case = roberta_layer.fca.weight
__snake_case = roberta_layer.fca.bias
# end of layer
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'].dense.weight
__snake_case = roberta.model.classification_heads['mnli'].dense.bias
__snake_case = roberta.model.classification_heads['mnli'].out_proj.weight
__snake_case = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__snake_case = roberta.model.encoder.lm_head.dense.weight
__snake_case = roberta.model.encoder.lm_head.dense.bias
__snake_case = roberta.model.encoder.lm_head.layer_norm.weight
__snake_case = roberta.model.encoder.lm_head.layer_norm.bias
__snake_case = roberta.model.encoder.lm_head.weight
__snake_case = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__snake_case = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
__snake_case = model(_snake_case )[0]
if classification_head:
__snake_case = roberta.model.classification_heads['mnli'](roberta.extract_features(_snake_case ) )
else:
__snake_case = roberta.model(_snake_case )[0]
print(our_output.shape , their_output.shape )
__snake_case = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__snake_case = torch.allclose(_snake_case , _snake_case , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case , exist_ok=_snake_case )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Optional[int] = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ['MobileNetV2FeatureExtractor']
UpperCAmelCase__ : Any = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
from __future__ import annotations
import requests
def A ( snake_case__ : Any ) -> int:
'''simple docstring'''
__snake_case = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(A_ ).json()
def A ( snake_case__ : List[str] = 10 ) -> str:
'''simple docstring'''
__snake_case = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__snake_case = requests.get(A_ ).json()[:max_stories]
return [get_hackernews_story(A_ ) for story_id in story_ids]
def A ( snake_case__ : Union[str, Any] = 10 ) -> Optional[Any]:
'''simple docstring'''
__snake_case = hackernews_top_stories(A_ )
return "\n".join('* [{title}]({url})'.format(**A_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowercase ( snake_case__ ):
__UpperCAmelCase = '''gpt_neo'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowercase_=5_0_2_5_7 , lowercase_=2_0_4_8 , lowercase_=2_0_4_8 , lowercase_=2_4 , lowercase_=[[["global", "local"], 1_2]] , lowercase_=1_6 , lowercase_=None , lowercase_=2_5_6 , lowercase_="gelu_new" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=1e-5 , lowercase_=0.02 , lowercase_=True , lowercase_=5_0_2_5_6 , lowercase_=5_0_2_5_6 , **lowercase_ , ) -> Optional[int]:
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_layers
__snake_case = num_heads
__snake_case = intermediate_size
__snake_case = window_size
__snake_case = activation_function
__snake_case = resid_dropout
__snake_case = embed_dropout
__snake_case = attention_dropout
__snake_case = classifier_dropout
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = attention_types
__snake_case = self.expand_attention_types_params(_A)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.')
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A)
@staticmethod
def _a ( lowercase_) -> List[str]:
__snake_case = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def A ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
__snake_case = input.size()
__snake_case = len(snake_case__ )
__snake_case = shape[dimension]
__snake_case = torch.arange(0 , snake_case__ , snake_case__ )
__snake_case = torch.div(sizedim - size , snake_case__ , rounding_mode='floor' ) + 1
__snake_case = torch.arange(snake_case__ ) + low_indices[:min_length][:, None]
__snake_case = [slice(snake_case__ )] * rank
__snake_case = indices
__snake_case = input[s]
__snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case__ )
def A ( snake_case__ : Any , snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
__snake_case = torch.arange(1 , snake_case__ )
__snake_case = torch.remainder(snake_case__ , snake_case__ )
__snake_case = remainders == 0
__snake_case = candidates[divisor_indices]
__snake_case = torch.max(snake_case__ )
return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='floor' )
class __lowercase ( snake_case__ ):
@property
def _a ( self) -> int:
__snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs')
__snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _a ( self) -> Dict:
return self._config.num_heads
def _a ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> int:
__snake_case = super(_A , self).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case = [
(torch.zeros(_A), torch.zeros(_A)) for _ in range(self.num_layers)
]
__snake_case = common_inputs['attention_mask']
if self.use_past:
__snake_case = ordered_inputs['attention_mask'].dtype
__snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A)] , dim=1)
return ordered_inputs
@property
def _a ( self) -> Tuple:
return 1_3
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
def A ( snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def A ( snake_case__ : List[Any] = 100 ) -> int:
'''simple docstring'''
__snake_case = 1
__snake_case = 2
for i in range(2 , max_n + 1 ):
__snake_case = pre_numerator
__snake_case = 2 * i // 3 if i % 3 == 0 else 1
__snake_case = cur_numerator
__snake_case = e_cont * pre_numerator + temp
return sum_digits(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCAmelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class __lowercase ( lowercase_ ):
__UpperCAmelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__UpperCAmelCase = field(default=lowercase_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__UpperCAmelCase = field(
default=lowercase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__UpperCAmelCase = field(default=lowercase_ , metadata={'''help''': '''whether to use adafactor'''} )
__UpperCAmelCase = field(
default=lowercase_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__UpperCAmelCase = field(
default=lowercase_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__UpperCAmelCase = field(default=lowercase_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__UpperCAmelCase = field(
default=lowercase_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__UpperCAmelCase = field(
default='''linear''' , metadata={'''help''': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import sys
from collections import defaultdict
class __lowercase :
def __init__( self) -> List[str]:
__snake_case = []
def _a ( self , lowercase_) -> int:
return self.node_position[vertex]
def _a ( self , lowercase_ , lowercase_) -> Any:
__snake_case = pos
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__snake_case = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__snake_case = 2 * start + 1
else:
__snake_case = 2 * start + 2
if heap[smallest_child] < heap[start]:
__snake_case , __snake_case = heap[smallest_child], positions[smallest_child]
__snake_case , __snake_case = (
heap[start],
positions[start],
)
__snake_case , __snake_case = temp, tempa
__snake_case = self.get_position(positions[smallest_child])
self.set_position(
positions[smallest_child] , self.get_position(positions[start]))
self.set_position(positions[start] , UpperCamelCase_)
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = position[index]
while index != 0:
__snake_case = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2)
if val < heap[parent]:
__snake_case = heap[parent]
__snake_case = position[parent]
self.set_position(position[parent] , UpperCamelCase_)
else:
__snake_case = val
__snake_case = temp
self.set_position(UpperCamelCase_ , UpperCamelCase_)
break
__snake_case = parent
else:
__snake_case = val
__snake_case = temp
self.set_position(UpperCamelCase_ , 0)
def _a ( self , lowercase_ , lowercase_) -> str:
__snake_case = len(UpperCamelCase_) // 2 - 1
for i in range(UpperCamelCase_ , -1 , -1):
self.top_to_bottom(UpperCamelCase_ , UpperCamelCase_ , len(UpperCamelCase_) , UpperCamelCase_)
def _a ( self , lowercase_ , lowercase_) -> List[str]:
__snake_case = positions[0]
__snake_case = sys.maxsize
self.top_to_bottom(UpperCamelCase_ , 0 , len(UpperCamelCase_) , UpperCamelCase_)
return temp
def A ( snake_case__ : Dict ) -> Any:
'''simple docstring'''
__snake_case = Heap()
__snake_case = [0] * len(A__ )
__snake_case = [-1] * len(A__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__snake_case = [] # Heap of Distance of vertices from their neighboring vertex
__snake_case = []
for vertex in range(len(A__ ) ):
distance_tv.append(sys.maxsize )
positions.append(A__ )
heap.node_position.append(A__ )
__snake_case = []
__snake_case = 1
__snake_case = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__snake_case = 0
__snake_case = distance
heap.heapify(A__ , A__ )
for _ in range(1 , len(A__ ) ):
__snake_case = heap.delete_minimum(A__ , A__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__snake_case = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A__ )]
):
__snake_case = distance
heap.bottom_to_top(
A__ , heap.get_position(A__ ) , A__ , A__ )
__snake_case = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : Union[str, Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def A ( snake_case__ : Optional[Any] , snake_case__ : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = int(lowerCamelCase__ )
assert noofclusters < len(lowerCamelCase__ )
# Find out the dimensionality
__snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
__snake_case = list(range(len(lowerCamelCase__ ) ) )
shuffle(lowerCamelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCamelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__snake_case = tf.placeholder('float64' , [dim] )
__snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__snake_case = [tf.Variable(0 ) for i in range(len(lowerCamelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__snake_case = tf.placeholder('int32' )
__snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__snake_case = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__snake_case = tf.reduce_mean(lowerCamelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__snake_case = tf.placeholder('float' , [dim] )
__snake_case = tf.placeholder('float' , [dim] )
__snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase__ , lowerCamelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__snake_case = tf.placeholder('float' , [noofclusters] )
__snake_case = tf.argmin(lowerCamelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCamelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__snake_case = 100
for _ in range(lowerCamelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCamelCase__ ) ):
__snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__snake_case = [
sess.run(lowerCamelCase__ , feed_dict={va: vect, va: sess.run(lowerCamelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__snake_case = sess.run(
lowerCamelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCamelCase__ ):
# Collect all the vectors assigned to this cluster
__snake_case = [
vectors[i]
for i in range(len(lowerCamelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__snake_case = sess.run(
lowerCamelCase__ , feed_dict={mean_input: array(lowerCamelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__snake_case = sess.run(lowerCamelCase__ )
__snake_case = sess.run(lowerCamelCase__ )
return centroids, assignments
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
def A ( snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case = len(a__ )
__snake_case = sum(a__ )
__snake_case = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case = True
for i in range(1 , s + 1 ):
__snake_case = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case = s - 2 * j
break
return diff
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowercase ( __lowerCAmelCase ):
@slow
@require_torch
def _a ( self) -> Optional[Any]:
__snake_case = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny')
__snake_case = BertTokenizer.from_pretrained('bert-base-uncased')
__snake_case = bertabert.config.encoder.vocab_size
__snake_case = tokenizer.sep_token_id
__snake_case = tokenizer.cls_token_id
__snake_case = 1_2_8
__snake_case = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]')
__snake_case = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]')
__snake_case = train_dataset.select(range(3_2))
__snake_case = val_dataset.select(range(1_6))
__snake_case = 4
def _map_to_encoder_decoder_inputs(lowercase_):
# Tokenizer will automatically set [BOS] <text> [EOS]
__snake_case = tokenizer(batch['article'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=5_1_2)
__snake_case = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=1_2_8)
__snake_case = inputs.input_ids
__snake_case = inputs.attention_mask
__snake_case = outputs.input_ids
__snake_case = outputs.input_ids.copy()
__snake_case = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__snake_case = outputs.attention_mask
assert all(len(lowerCAmelCase_) == 5_1_2 for x in inputs.input_ids)
assert all(len(lowerCAmelCase_) == 1_2_8 for x in outputs.input_ids)
return batch
def _compute_metrics(lowercase_):
__snake_case = pred.label_ids
__snake_case = pred.predictions
# all unnecessary tokens are removed
__snake_case = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
__snake_case = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_)
__snake_case = sum([int(pred_str[i] == label_str[i]) for i in range(len(lowerCAmelCase_))]) / len(lowerCAmelCase_)
return {"accuracy": accuracy}
# map train dataset
__snake_case = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__snake_case = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase_ , per_device_train_batch_size=lowerCAmelCase_ , per_device_eval_batch_size=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , evaluation_strategy='steps' , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__snake_case = SeqaSeqTrainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
# start training
trainer.train()
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
def A ( snake_case__ : Any = 1000 ) -> int:
'''simple docstring'''
__snake_case = -1
__snake_case = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__snake_case = (n * n - 2 * a * n) // (2 * n - 2 * a)
__snake_case = n - a - b
if c * c == (a * a + b * b):
__snake_case = a * b * c
if candidate >= product:
__snake_case = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
UpperCAmelCase__ : str = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase__ : str = [None] * 10_00_00_00
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = False
def A ( snake_case__ : int ) -> Optional[Any]:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__snake_case = chain(next_number(__a ) )
__snake_case = number_chain
while number < 1000_0000:
__snake_case = number_chain
number *= 10
return number_chain
def A ( snake_case__ : int = 1000_0000 ) -> Optional[int]:
'''simple docstring'''
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A ( snake_case__ : Namespace ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase__ : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __lowercase ( _snake_case ):
@staticmethod
def _a ( lowercase_) -> Tuple:
__snake_case = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=snake_case_ , required=snake_case_ , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=snake_case_ , required=snake_case_ , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=snake_case_ , required=snake_case_ , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=snake_case_ , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=snake_case_ , default=snake_case_ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=snake_case_)
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , *lowercase_ , ) -> Tuple:
__snake_case = logging.get_logger('transformers-cli/converting')
self._logger.info(F"Loading model {model_type}")
__snake_case = model_type
__snake_case = tf_checkpoint
__snake_case = pytorch_dump_output
__snake_case = config
__snake_case = finetuning_task_name
def _a ( self) -> Any:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_)
if "ckpt" in self._tf_checkpoint.lower():
__snake_case = self._tf_checkpoint
__snake_case = ""
else:
__snake_case = self._tf_checkpoint
__snake_case = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A ( snake_case__ : int = 100_0000 ) -> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> str:
'''simple docstring'''
__snake_case = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
__snake_case = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
__snake_case = parser.parse_args()
if not hasattr(_lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
__snake_case = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def A ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__snake_case = json.loads(lowerCamelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__snake_case = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__snake_case = json.loads(lowerCamelCase_ )
if not mpi_options.get('sagemaker_mpi_enabled' , lowerCamelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def _a ( self) -> Optional[int]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , __lowerCamelCase , )
@cached_property
def _a ( self) -> "torch.device":
logger.info('PyTorch: setting up devices')
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch')
if self.no_cuda:
__snake_case = torch.device('cpu')
__snake_case = 0
elif is_sagemaker_model_parallel_available():
__snake_case = smp.local_rank()
__snake_case = torch.device('cuda' , __lowerCamelCase)
__snake_case = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta)
__snake_case = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK'))
__snake_case = torch.device('cuda' , self.local_rank)
__snake_case = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__snake_case = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__snake_case = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta)
__snake_case = torch.device('cuda' , self.local_rank)
__snake_case = 1
if device.type == "cuda":
torch.cuda.set_device(__lowerCamelCase)
return device
@property
def _a ( self) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _a ( self) -> Tuple:
return not is_sagemaker_model_parallel_available()
@property
def _a ( self) -> int:
return False
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( snake_case__ : Tuple , snake_case__ : List[str]=False ) -> Optional[Any]:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(__SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ : Tuple = parse_flag_from_env("RUN_SLOW", default=False)
def A ( snake_case__ : List[str] ) -> int:
'''simple docstring'''
return unittest.skip('Test was skipped' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Union[str, Any] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Any ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Any ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : List[Any] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : int ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Dict=None , snake_case__ : List[Any]=None ) -> str:
'''simple docstring'''
if test_case is None:
return partial(__SCREAMING_SNAKE_CASE , version=__SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version('>=' , __SCREAMING_SNAKE_CASE ) , f"test requires torch version >= {version}" )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : int ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( snake_case__ : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__SCREAMING_SNAKE_CASE )
class __lowercase ( unittest.TestCase ):
__UpperCAmelCase = True
@classmethod
def _a ( cls) -> Optional[Any]:
__snake_case = tempfile.mkdtemp()
@classmethod
def _a ( cls) -> List[Any]:
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _a ( self) -> Any:
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_)
class __lowercase ( unittest.TestCase ):
def _a ( self) -> str:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_) -> Tuple:
__snake_case = mocks if isinstance(lowercase_ , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A ( snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case = AcceleratorState()
__snake_case = tensor[None].clone().to(state.device )
__snake_case = gather(__SCREAMING_SNAKE_CASE ).cpu()
__snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __SCREAMING_SNAKE_CASE ):
return False
return True
class __lowercase :
def __init__( self , lowercase_ , lowercase_ , lowercase_) -> Dict:
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def A ( snake_case__ : Optional[int] , snake_case__ : Dict ) -> Optional[int]:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(__SCREAMING_SNAKE_CASE )
else:
break
async def A ( snake_case__ : List[str] , snake_case__ : Tuple=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__SCREAMING_SNAKE_CASE ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[str]="" ):
__snake_case = line.decode('utf-8' ).rstrip()
sink.append(__SCREAMING_SNAKE_CASE )
if not quiet:
print(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , file=__SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case__ : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case__ : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ) ),
] , timeout=__SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def A ( snake_case__ : Union[str, Any] , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=180 , snake_case__ : Optional[int]=False , snake_case__ : List[str]=True ) -> Tuple:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , stdin=__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE , echo=__SCREAMING_SNAKE_CASE ) )
__snake_case = " ".join(__SCREAMING_SNAKE_CASE )
if result.returncode > 0:
__snake_case = "\n".join(result.stderr )
raise RuntimeError(
f"\'{cmd_str}\' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
return result
class __lowercase ( snake_case__ ):
pass
def A ( snake_case__ : int , snake_case__ : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
try:
__snake_case = subprocess.check_output(__SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__SCREAMING_SNAKE_CASE , 'decode' ):
__snake_case = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"Command `{' '.join(__SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=snake_case__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=snake_case__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=snake_case__ )
return parser.parse_args()
def A ( ) -> Tuple:
'''simple docstring'''
__snake_case = parse_args()
# Import training_script as a module.
__snake_case = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__snake_case = script_fpath.stem
__snake_case = importlib.import_module(snake_case__ )
# Patch sys.argv
__snake_case = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
import torch
from torch import nn
class __lowercase ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1 , lowercase_=False) -> Optional[int]:
super().__init__()
__snake_case = n_token
__snake_case = d_embed
__snake_case = d_proj
__snake_case = cutoffs + [n_token]
__snake_case = [0] + self.cutoffs
__snake_case = div_val
__snake_case = self.cutoffs[0]
__snake_case = len(self.cutoffs) - 1
__snake_case = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__snake_case = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
__snake_case = nn.Parameter(torch.zeros(self.n_clusters))
__snake_case = nn.ModuleList()
__snake_case = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_)))
else:
self.out_projs.append(lowercase_)
self.out_layers.append(nn.Linear(lowercase_ , lowercase_))
else:
for i in range(len(self.cutoffs)):
__snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase_ , lowercase_)))
self.out_layers.append(nn.Linear(lowercase_ , r_idx - l_idx))
__snake_case = keep_order
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Tuple:
if proj is None:
__snake_case = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__snake_case = nn.functional.linear(lowercase_ , proj.t().contiguous())
__snake_case = nn.functional.linear(lowercase_ , lowercase_ , bias=lowercase_)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _a ( self , lowercase_ , lowercase_=None , lowercase_=False) -> List[str]:
if labels is not None:
# Shift so that tokens < n predict n
__snake_case = hidden[..., :-1, :].contiguous()
__snake_case = labels[..., 1:].contiguous()
__snake_case = hidden.view(-1 , hidden.size(-1))
__snake_case = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('Input and labels should have the same size in the batch dimension.')
else:
__snake_case = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
__snake_case = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
__snake_case = labels != -1_0_0
__snake_case = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device)
__snake_case = (
-nn.functional.log_softmax(lowercase_ , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
__snake_case = nn.functional.log_softmax(lowercase_ , dim=-1)
else:
# construct weights and biases
__snake_case = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
__snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = self.out_layers[0].weight[l_idx:r_idx]
__snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
__snake_case = self.out_layers[i].weight
__snake_case = self.out_layers[i].bias
if i == 0:
__snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0)
__snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowercase_)
biases.append(lowercase_)
__snake_case = weights[0], biases[0], self.out_projs[0]
__snake_case = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = nn.functional.log_softmax(lowercase_ , dim=1)
if labels is None:
__snake_case = hidden.new_empty((head_logit.size(0), self.n_token))
else:
__snake_case = torch.zeros_like(lowercase_ , dtype=hidden.dtype , device=hidden.device)
__snake_case = 0
__snake_case = [0] + self.cutoffs
for i in range(len(lowercase_) - 1):
__snake_case = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__snake_case = (labels >= l_idx) & (labels < r_idx)
__snake_case = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__snake_case = labels.index_select(0 , lowercase_) - l_idx
__snake_case = head_logprob.index_select(0 , lowercase_)
__snake_case = hidden.index_select(0 , lowercase_)
else:
__snake_case = hidden
if i == 0:
if labels is not None:
__snake_case = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
__snake_case = head_logprob[:, : self.cutoffs[0]]
else:
__snake_case = weights[i], biases[i], self.out_projs[i]
__snake_case = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = nn.functional.log_softmax(lowercase_ , dim=1)
__snake_case = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__snake_case = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
__snake_case = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__snake_case = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order') and self.keep_order) or keep_order:
out.index_copy_(0 , lowercase_ , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def _a ( self , lowercase_) -> Any:
if self.n_clusters == 0:
__snake_case = self._compute_logit(lowercase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowercase_ , dim=-1)
else:
# construct weights and biases
__snake_case = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
__snake_case = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__snake_case = self.out_layers[0].weight[l_idx:r_idx]
__snake_case = self.out_layers[0].bias[l_idx:r_idx]
else:
__snake_case = self.out_layers[i].weight
__snake_case = self.out_layers[i].bias
if i == 0:
__snake_case = torch.cat([weight_i, self.cluster_weight] , dim=0)
__snake_case = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowercase_)
biases.append(lowercase_)
__snake_case = weights[0], biases[0], self.out_projs[0]
__snake_case = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = hidden.new_empty((head_logit.size(0), self.n_token))
__snake_case = nn.functional.log_softmax(lowercase_ , dim=1)
__snake_case = [0] + self.cutoffs
for i in range(len(lowercase_) - 1):
__snake_case = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__snake_case = head_logprob[:, : self.cutoffs[0]]
else:
__snake_case = weights[i], biases[i], self.out_projs[i]
__snake_case = self._compute_logit(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = nn.functional.log_softmax(lowercase_ , dim=1)
__snake_case = head_logprob[:, -i] + tail_logprob_i
__snake_case = logprob_i
return out
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
import requests
lowerCAmelCase : Dict = "YOUR API KEY"
def A ( snake_case__ : str , snake_case__ : str = giphy_api_key ) -> Optional[Any]:
'''simple docstring'''
__snake_case = '+'.join(query.split() )
__snake_case = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
__snake_case = requests.get(_SCREAMING_SNAKE_CASE ).json()['data']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCAmelCase__ : Any = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class __lowercase ( A_ ):
__UpperCAmelCase = '''albert'''
def __init__( self , lowercase_=3_0_0_0_0 , lowercase_=1_2_8 , lowercase_=4_0_9_6 , lowercase_=1_2 , lowercase_=1 , lowercase_=6_4 , lowercase_=1_6_3_8_4 , lowercase_=1 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=0 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0.1 , lowercase_="absolute" , lowercase_=0 , lowercase_=2 , lowercase_=3 , **lowercase_ , ) -> Tuple:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
__snake_case = vocab_size
__snake_case = embedding_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_hidden_groups
__snake_case = num_attention_heads
__snake_case = inner_group_num
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = classifier_dropout_prob
__snake_case = position_embedding_type
class __lowercase ( A_ ):
@property
def _a ( self) -> List[str]:
if self.task == "multiple-choice":
__snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
import math
def A ( ) -> Tuple:
'''simple docstring'''
__snake_case = input('Enter message: ' )
__snake_case = int(input(f"Enter key [2-{len(lowerCAmelCase__ ) - 1}]: " ) )
__snake_case = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
__snake_case = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('d' ):
__snake_case = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"Output:\n{text + '|'}" )
def A ( snake_case__ : int , snake_case__ : str ) -> List[Any]:
'''simple docstring'''
__snake_case = [''] * key
for col in range(lowerCAmelCase__ ):
__snake_case = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def A ( snake_case__ : int , snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = math.ceil(len(lowerCAmelCase__ ) / key )
__snake_case = key
__snake_case = (num_cols * num_rows) - len(lowerCAmelCase__ )
__snake_case = [''] * num_cols
__snake_case = 0
__snake_case = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__snake_case = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( a__ , unittest.TestCase ):
__UpperCAmelCase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _a ( self , lowercase_=0) -> Tuple:
__snake_case = np.random.RandomState(lowerCamelCase_)
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _a ( self) -> Tuple:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**lowerCamelCase_).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> str:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase_)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**lowerCamelCase_).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> int:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**lowerCamelCase_).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> Optional[Any]:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**lowerCamelCase_).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> Optional[Any]:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**lowerCamelCase_).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> Dict:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**lowerCamelCase_).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__snake_case = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> Optional[Any]:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = 3 * [inputs['prompt']]
# forward
__snake_case = pipe(**lowerCamelCase_)
__snake_case = output.images[0, -3:, -3:, -1]
__snake_case = self.get_dummy_inputs()
__snake_case = 3 * [inputs.pop('prompt')]
__snake_case = pipe.tokenizer(
lowerCamelCase_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='np' , )
__snake_case = text_inputs['input_ids']
__snake_case = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__snake_case = prompt_embeds
# forward
__snake_case = pipe(**lowerCamelCase_)
__snake_case = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def _a ( self) -> List[str]:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = self.get_dummy_inputs()
__snake_case = 3 * ['this is a negative prompt']
__snake_case = negative_prompt
__snake_case = 3 * [inputs['prompt']]
# forward
__snake_case = pipe(**lowerCamelCase_)
__snake_case = output.images[0, -3:, -3:, -1]
__snake_case = self.get_dummy_inputs()
__snake_case = 3 * [inputs.pop('prompt')]
__snake_case = []
for p in [prompt, negative_prompt]:
__snake_case = pipe.tokenizer(
lowerCamelCase_ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='np' , )
__snake_case = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__snake_case , __snake_case = embeds
# forward
__snake_case = pipe(**lowerCamelCase_)
__snake_case = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
@property
def _a ( self) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self) -> Optional[Any]:
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def _a ( self) -> List[str]:
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = 'A painting of a squirrel eating a burger'
np.random.seed(0)
__snake_case = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='np')
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _a ( self) -> Optional[Any]:
__snake_case = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx')
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = 'open neural network exchange'
__snake_case = np.random.RandomState(0)
__snake_case = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase_ , output_type='np')
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _a ( self) -> Union[str, Any]:
__snake_case = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx')
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = 'open neural network exchange'
__snake_case = np.random.RandomState(0)
__snake_case = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCamelCase_ , output_type='np')
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _a ( self) -> Dict:
__snake_case = 0
def test_callback_fn(lowercase_ , lowercase_ , lowercase_) -> None:
__snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case = latents[0, -3:, -3:, -1]
__snake_case = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__snake_case = latents[0, -3:, -3:, -1]
__snake_case = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__snake_case = False
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase_)
__snake_case = 'Andromeda galaxy in a bottle'
__snake_case = np.random.RandomState(0)
pipe(
prompt=lowerCamelCase_ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self) -> int:
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase_ , lowerCamelCase_)
assert pipe.safety_checker is None
__snake_case = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_)
__snake_case = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase_)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__snake_case = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Any:
__snake_case = 'laion/clap-htsat-unfused'
__snake_case = tempfile.mkdtemp()
def _a ( self , **lowercase_) -> Union[str, Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__)
def _a ( self , **lowercase_) -> Optional[int]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase__)
def _a ( self) -> Tuple:
shutil.rmtree(self.tmpdirname)
def _a ( self) -> List[Any]:
__snake_case = self.get_tokenizer()
__snake_case = self.get_feature_extractor()
__snake_case = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
processor.save_pretrained(self.tmpdirname)
__snake_case = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase__)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__)
def _a ( self) -> Optional[Any]:
__snake_case = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__snake_case = self.get_feature_extractor(do_normalize=UpperCamelCase__ , padding_value=1.0)
__snake_case = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase__)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__)
def _a ( self) -> Tuple:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
__snake_case = floats_list((3, 1_0_0_0))
__snake_case = feature_extractor(UpperCamelCase__ , return_tensors='np')
__snake_case = processor(audios=UpperCamelCase__ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def _a ( self) -> Optional[Any]:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
__snake_case = 'This is a test string'
__snake_case = processor(text=UpperCamelCase__)
__snake_case = tokenizer(UpperCamelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _a ( self) -> Any:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(UpperCamelCase__)
__snake_case = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def _a ( self) -> int:
__snake_case = self.get_feature_extractor()
__snake_case = self.get_tokenizer()
__snake_case = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
def A ( snake_case__ : Any ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__snake_case = grid[0]
for row_n in range(1 , len(__lowerCAmelCase ) ):
__snake_case = grid[row_n]
__snake_case = fill_row(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = grid[row_n]
return grid[-1][-1]
def A ( snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def _a ( self) -> List[str]:
__snake_case = tempfile.mkdtemp()
__snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__snake_case = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__snake_case = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def _a ( self , **lowercase_) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def _a ( self , **lowercase_) -> Any:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def _a ( self , **lowercase_) -> List[str]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def _a ( self) -> Optional[Any]:
shutil.rmtree(self.tmpdirname)
def _a ( self) -> Any:
__snake_case = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__snake_case = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def _a ( self) -> Optional[Any]:
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = self.get_image_processor()
__snake_case = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
__snake_case = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
__snake_case = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
__snake_case = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase)
def _a ( self) -> Optional[int]:
__snake_case = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__snake_case = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__snake_case = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def _a ( self) -> Optional[Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(_UpperCAmelCase , return_tensors='np')
__snake_case = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def _a ( self) -> List[Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__snake_case = '''lower newer'''
__snake_case = processor(text=_UpperCAmelCase)
__snake_case = tokenizer(_UpperCAmelCase , padding='max_length' , max_length=6_4)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _a ( self) -> int:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def _a ( self) -> Dict:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(_UpperCAmelCase)
__snake_case = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def _a ( self) -> Dict:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
def A ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
__snake_case = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__snake_case = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''bert-generation'''
def __init__( self , lowercase_=5_0_3_5_8 , lowercase_=1_0_2_4 , lowercase_=2_4 , lowercase_=1_6 , lowercase_=4_0_9_6 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_=2 , lowercase_=1 , lowercase_="absolute" , lowercase_=True , **lowercase_ , ) -> Optional[int]:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=lowerCamelCase__ ):
lowerCAmelCase__ = ["""note_seq"""]
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['note_seq'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> int:
requires_backends(cls , ['note_seq'])
@classmethod
def _a ( cls , *lowercase_ , **lowercase_) -> List[str]:
requires_backends(cls , ['note_seq'])
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import math
import tensorflow as tf
from packaging import version
def A ( snake_case__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = tf.convert_to_tensor(snake_case__ )
__snake_case = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def A ( snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = tf.convert_to_tensor(snake_case__ )
__snake_case = tf.cast(math.pi , x.dtype )
__snake_case = tf.cast(0.044_715 , x.dtype )
__snake_case = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(snake_case__ , 3 )) ))
return x * cdf
def A ( snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = tf.convert_to_tensor(snake_case__ )
return x * tf.tanh(tf.math.softplus(snake_case__ ) )
def A ( snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
__snake_case = tf.convert_to_tensor(snake_case__ )
__snake_case = tf.cast(0.044_715 , x.dtype )
__snake_case = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def A ( snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case = tf.convert_to_tensor(snake_case__ )
__snake_case = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def A ( snake_case__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return tf.clip_by_value(_gelu(snake_case__ ) , -10 , 10 )
def A ( snake_case__ : Dict , snake_case__ : int=-1 ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case = tf.split(snake_case__ , 2 , axis=snake_case__ )
return a * tf.math.sigmoid(snake_case__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def A ( snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
return tf.keras.activations.gelu(snake_case__ , approximate=snake_case__ )
UpperCAmelCase__ : Tuple = tf.keras.activations.gelu
UpperCAmelCase__ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase__ : Optional[int] = _gelu
UpperCAmelCase__ : Optional[int] = _gelu_new
UpperCAmelCase__ : Union[str, Any] = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def A ( snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A ( snake_case__ : NDArray[floataa] , snake_case__ : NDArray[floataa] , snake_case__ : list[int] , snake_case__ : int , ) -> Dict:
'''simple docstring'''
__snake_case = coefficient_matrix.shape
__snake_case = constant_matrix.shape
if rowsa != colsa:
__snake_case = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(snake_case__ )
if colsa != 1:
__snake_case = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(snake_case__ )
if rowsa != rowsa:
__snake_case = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(snake_case__ )
if len(snake_case__ ) != rowsa:
__snake_case = (
'''Number of initial values must be equal to number of rows in coefficient '''
f"matrix but received {len(snake_case__ )} and {rowsa}"
)
raise ValueError(snake_case__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__snake_case = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__snake_case = table.shape
strictly_diagonally_dominant(snake_case__ )
# Iterates the whole matrix for given number of times
for _ in range(snake_case__ ):
__snake_case = []
for row in range(snake_case__ ):
__snake_case = 0
for col in range(snake_case__ ):
if col == row:
__snake_case = table[row][col]
elif col == cols - 1:
__snake_case = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__snake_case = (temp + val) / denom
new_val.append(snake_case__ )
__snake_case = new_val
return [float(snake_case__ ) for i in new_val]
def A ( snake_case__ : NDArray[floataa] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = table.shape
__snake_case = True
for i in range(0 , snake_case__ ):
__snake_case = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCAmelCase__ : List[str] = 3_00 # TEMPERATURE (unit = K)
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ) -> Union[str, Any]:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Any = logging.get_logger(__name__)
def A ( snake_case__ : Dict ) -> int:
'''simple docstring'''
__snake_case = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__snake_case = [144, 192, 240]
__snake_case = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__snake_case = [96, 120, 144]
__snake_case = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__snake_case = [64, 80, 96]
__snake_case = [16, 16, 24, 48, 64, 80, 320]
__snake_case = 0.05
__snake_case = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
__snake_case = 512
__snake_case = 16
__snake_case = 21
__snake_case = 'pascal-voc-id2label.json'
else:
__snake_case = 1000
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = 'huggingface/label-files'
__snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def A ( snake_case__ : Any , snake_case__ : List[str]=False ) -> Dict:
'''simple docstring'''
for i in range(1 , 6 ):
if f"layer_{i}." in name:
__snake_case = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
__snake_case = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
__snake_case = name.replace('.block.' , '.' )
if "exp_1x1" in name:
__snake_case = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
__snake_case = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
__snake_case = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
__snake_case = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
__snake_case = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
__snake_case = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
__snake_case = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
__snake_case = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
__snake_case = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
__snake_case = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
__snake_case = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
__snake_case = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
__snake_case = name.replace(f".global_rep.{i}.weight" , '.layernorm.weight' )
if f".global_rep.{i}.bias" in name:
__snake_case = name.replace(f".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
__snake_case = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
__snake_case = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
__snake_case = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
__snake_case = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
__snake_case = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
__snake_case = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
__snake_case = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
__snake_case = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
__snake_case = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
__snake_case = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
__snake_case = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
__snake_case = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
__snake_case = 'mobilevit.' + name
return name
def A ( snake_case__ : str , snake_case__ : Tuple , snake_case__ : Tuple=False ) -> List[str]:
'''simple docstring'''
if base_model:
__snake_case = ''
else:
__snake_case = 'mobilevit.'
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key[:8] == "encoder.":
__snake_case = key[8:]
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[0][6:] ) - 1
__snake_case = int(key_split[3] )
__snake_case = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
__snake_case = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__snake_case = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
else:
__snake_case = val
return orig_state_dict
def A ( ) -> List[Any]:
'''simple docstring'''
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def A ( snake_case__ : Tuple , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Tuple=False ) -> Dict:
'''simple docstring'''
__snake_case = get_mobilevit_config(UpperCamelCase__ )
# load original state_dict
__snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
__snake_case = MobileViTForSemanticSegmentation(UpperCamelCase__ ).eval()
else:
__snake_case = MobileViTForImageClassification(UpperCamelCase__ ).eval()
__snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__snake_case = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
__snake_case = model(**UpperCamelCase__ )
__snake_case = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__snake_case = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__snake_case = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__snake_case = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__snake_case = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
__snake_case = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
__snake_case = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
__snake_case = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
__snake_case = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCamelCase__ , organization='apple' )
model.push_to_hub(UpperCamelCase__ , organization='apple' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
@dataclass
class __lowercase :
__UpperCAmelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
__UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _a ( self) -> Optional[int]:
__snake_case = self.task_name.lower()
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = """train"""
__UpperCAmelCase = """dev"""
__UpperCAmelCase = """test"""
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def __init__( self , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = Split.train , lowercase_ = None , ) -> int:
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , lowercase_ , )
__snake_case = args
__snake_case = glue_processors[args.task_name]()
__snake_case = glue_output_modes[args.task_name]
if isinstance(lowercase_ , lowercase_):
try:
__snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
__snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}" , )
__snake_case = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '.lock'
with FileLock(lowercase_):
if os.path.exists(lowercase_) and not args.overwrite_cache:
__snake_case = time.time()
__snake_case = torch.load(lowercase_)
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start)
else:
logger.info(F"Creating features from dataset file at {args.data_dir}")
if mode == Split.dev:
__snake_case = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
__snake_case = self.processor.get_test_examples(args.data_dir)
else:
__snake_case = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
__snake_case = examples[:limit_length]
__snake_case = glue_convert_examples_to_features(
lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , )
__snake_case = time.time()
torch.save(self.features , lowercase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]")
def __len__( self) -> Dict:
return len(self.features)
def __getitem__( self , lowercase_) -> InputFeatures:
return self.features[i]
def _a ( self) -> int:
return self.label_list
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase__ : Dict = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCAmelCase__ : Optional[int] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCAmelCase__ : List[str] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def A ( snake_case__ : List[str] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = simple_accuracy(snake_case__ , snake_case__ )
__snake_case = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( snake_case__ : List[Any] , snake_case__ : Any ) -> str:
'''simple docstring'''
__snake_case = np.array(snake_case__ )
__snake_case = np.array(snake_case__ )
__snake_case = en_sentvecs.shape[0]
# mean centering
__snake_case = en_sentvecs - np.mean(snake_case__ , axis=0 )
__snake_case = in_sentvecs - np.mean(snake_case__ , axis=0 )
__snake_case = cdist(snake_case__ , snake_case__ , 'cosine' )
__snake_case = np.array(range(snake_case__ ) )
__snake_case = sim.argsort(axis=1 )[:, :10]
__snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _a ( self) -> Union[str, Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def _a ( self , lowercase_ , lowercase_) -> List[str]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowercase_ , lowercase_)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowercase_ , lowercase_)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def A ( snake_case__ : Any ) -> List[str]:
'''simple docstring'''
return choice(snake_case__ )
def A ( snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = random_pivot(snake_case__ )
# partition based on pivot
# linear time
__snake_case = [e for e in lst if e < pivot]
__snake_case = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case__ ) < k - 1:
return kth_number(snake_case__ , k - len(snake_case__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ) -> Tuple:
'''simple docstring'''
__snake_case = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
__snake_case = Dataset.from_dict(snake_case__ )
return dataset
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = get_dataset()
__snake_case = make_duplicate_clusters(lowercase_ , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _a ( self) -> List[str]:
__snake_case = get_dataset()
__snake_case , __snake_case = deduplicate_dataset(lowercase_)
self.assertEqual(len(lowercase_) , 2)
print(lowercase_)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , lowercase_)
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''van'''
def __init__( self , lowercase_=2_2_4 , lowercase_=3 , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowercase_=[3, 3, 1_2, 3] , lowercase_=[8, 8, 4, 4] , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-6 , lowercase_=1e-2 , lowercase_=0.0 , lowercase_=0.0 , **lowercase_ , ) -> Union[str, Any]:
super().__init__(**lowercase_)
__snake_case = image_size
__snake_case = num_channels
__snake_case = patch_sizes
__snake_case = strides
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = mlp_ratios
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = dropout_rate
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : Optional[int] = logging.getLogger()
UpperCAmelCase__ : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowercase ( lowerCamelCase__ ):
def _a ( self , lowercase_) -> Tuple:
os.makedirs(lowercase_ , exist_ok=lowercase_)
__snake_case = {'source': 'What is love ?', 'target': 'life'}
__snake_case = {'train': 1_2, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__snake_case = '\n'.join([contents[field]] * n_lines[split])
with open(os.path.join(lowercase_ , F"{split}.{field}") , 'w') as f:
f.write(lowercase_)
def _a ( self , lowercase_ , lowercase_ = "pytorch") -> Dict:
__snake_case = self.get_auto_remove_tmp_dir()
__snake_case = os.path.join(lowercase_ , 'output')
__snake_case = os.path.join(lowercase_ , 'data')
self._create_dummy_data(data_dir=lowercase_)
__snake_case = F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split()
if gpus > 0:
testargs.append(F"--gpus={gpus}")
if is_apex_available():
testargs.append('--fp16')
else:
testargs.append('--gpus=0')
testargs.append('--distributed_backend=ddp_cpu')
testargs.append('--num_processes=2')
__snake_case = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env())
__snake_case = os.path.join(lowercase_ , 'metrics.json')
with open(lowercase_) as f:
__snake_case = json.load(lowercase_)
return result
@require_torch_gpu
def _a ( self) -> Optional[Any]:
__snake_case = self._run_finetune(gpus=1)
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_multi_gpu
def _a ( self) -> Tuple:
__snake_case = self._run_finetune(gpus=2)
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_gpu
@require_ray
def _a ( self) -> Union[str, Any]:
__snake_case = self._run_finetune(gpus=1 , distributed_retriever='ray')
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_multi_gpu
@require_ray
def _a ( self) -> Dict:
__snake_case = self._run_finetune(gpus=1 , distributed_retriever='ray')
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''trocr'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , lowercase_=5_0_2_6_5 , lowercase_=1_0_2_4 , lowercase_=1_2 , lowercase_=1_6 , lowercase_=4_0_9_6 , lowercase_="gelu" , lowercase_=5_1_2 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=2 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ) -> int:
__snake_case = vocab_size
__snake_case = d_model
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = activation_function
__snake_case = max_position_embeddings
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = init_std
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = scale_embedding
__snake_case = use_learned_position_embeddings
__snake_case = layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''image_processor''']
__UpperCAmelCase = '''SamImageProcessor'''
def __init__( self , lowercase_) -> int:
super().__init__(lowercase_)
__snake_case = self.image_processor
__snake_case = -1_0
__snake_case = self.image_processor.size['longest_edge']
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
__snake_case = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
__snake_case = encoding_image_processor['original_sizes']
if hasattr(lowercase_ , 'numpy'): # Checks if Torch or TF tensor
__snake_case = original_sizes.numpy()
__snake_case , __snake_case , __snake_case = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
__snake_case = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ) -> Union[str, Any]:
if input_points is not None:
if len(lowercase_) != len(lowercase_):
__snake_case = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0]) for point in input_points
]
else:
__snake_case = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_)
for point, original_size in zip(lowercase_ , lowercase_)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
__snake_case , __snake_case = self._pad_points_and_labels(lowercase_ , lowercase_)
__snake_case = np.array(lowercase_)
if input_labels is not None:
__snake_case = np.array(lowercase_)
if input_boxes is not None:
if len(lowercase_) != len(lowercase_):
__snake_case = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_)
for box in input_boxes
]
else:
__snake_case = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_)
for box, original_size in zip(lowercase_ , lowercase_)
]
__snake_case = np.array(lowercase_)
if input_boxes is not None:
if return_tensors == "pt":
__snake_case = torch.from_numpy(lowercase_)
# boxes batch size of 1 by default
__snake_case = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
__snake_case = tf.convert_to_tensor(lowercase_)
# boxes batch size of 1 by default
__snake_case = tf.expand_dims(lowercase_ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == "pt":
__snake_case = torch.from_numpy(lowercase_)
# point batch size of 1 by default
__snake_case = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
__snake_case = tf.convert_to_tensor(lowercase_)
# point batch size of 1 by default
__snake_case = tf.expand_dims(lowercase_ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == "pt":
__snake_case = torch.from_numpy(lowercase_)
# point batch size of 1 by default
__snake_case = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
__snake_case = tf.convert_to_tensor(lowercase_)
# point batch size of 1 by default
__snake_case = tf.expand_dims(lowercase_ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def _a ( self , lowercase_ , lowercase_) -> Any:
__snake_case = max([point.shape[0] for point in input_points])
__snake_case = []
for i, point in enumerate(lowercase_):
if point.shape[0] != expected_nb_points:
__snake_case = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
__snake_case = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(lowercase_)
__snake_case = processed_input_points
return input_points, input_labels
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False) -> np.ndarray:
__snake_case , __snake_case = original_size
__snake_case , __snake_case = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_)
__snake_case = deepcopy(lowercase_).astype(lowercase_)
if is_bounding_box:
__snake_case = coords.reshape(-1 , 2 , 2)
__snake_case = coords[..., 0] * (new_w / old_w)
__snake_case = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__snake_case = coords.reshape(-1 , 4)
return coords
def _a ( self , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
if input_points is not None:
if hasattr(lowercase_ , 'numpy'): # Checks for TF or Torch tensor
__snake_case = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_) or not isinstance(input_points[0] , lowercase_):
raise ValueError('Input points must be a list of list of floating points.')
__snake_case = [np.array(lowercase_) for input_point in input_points]
else:
__snake_case = None
if input_labels is not None:
if hasattr(lowercase_ , 'numpy'):
__snake_case = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_) or not isinstance(input_labels[0] , lowercase_):
raise ValueError('Input labels must be a list of list integers.')
__snake_case = [np.array(lowercase_) for label in input_labels]
else:
__snake_case = None
if input_boxes is not None:
if hasattr(lowercase_ , 'numpy'):
__snake_case = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_)
or not isinstance(input_boxes[0] , lowercase_)
or not isinstance(input_boxes[0][0] , lowercase_)
):
raise ValueError('Input boxes must be a list of list of list of floating points.')
__snake_case = [np.array(lowercase_).astype(np.floataa) for box in input_boxes]
else:
__snake_case = None
return input_points, input_labels, input_boxes
@property
def _a ( self) -> Optional[int]:
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_))
def _a ( self , *lowercase_ , **lowercase_) -> Union[str, Any]:
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_)
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = CycleDiffusionPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self) -> Tuple:
torch.manual_seed(0)
__snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__snake_case = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0)
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__snake_case = CLIPTextModel(lowercase_)
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , lowercase_ , lowercase_=0) -> List[Any]:
__snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_)).to(lowercase_)
__snake_case = image / 2 + 0.5
if str(lowercase_).startswith('mps'):
__snake_case = torch.manual_seed(lowercase_)
else:
__snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_)
__snake_case = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _a ( self) -> Tuple:
__snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = CycleDiffusionPipeline(**lowercase_)
__snake_case = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
__snake_case = self.get_dummy_inputs(lowercase_)
__snake_case = pipe(**lowercase_)
__snake_case = output.images
__snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
__snake_case = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def _a ( self) -> Dict:
__snake_case = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase_ , 'half'):
__snake_case = module.half()
__snake_case = CycleDiffusionPipeline(**lowercase_)
__snake_case = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
__snake_case = self.get_dummy_inputs(lowercase_)
__snake_case = pipe(**lowercase_)
__snake_case = output.images
__snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
__snake_case = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def _a ( self) -> Any:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline')
def _a ( self) -> Optional[int]:
return super().test_inference_batch_single_identical()
@skip_mps
def _a ( self) -> int:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a ( self) -> Optional[int]:
return super().test_save_load_optional_components()
@skip_mps
def _a ( self) -> Any:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> str:
__snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy')
__snake_case = init_image.resize((5_1_2, 5_1_2))
__snake_case = 'CompVis/stable-diffusion-v1-4'
__snake_case = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler')
__snake_case = CycleDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='fp16')
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
__snake_case = 'A black colored car'
__snake_case = 'A blue colored car'
__snake_case = torch.manual_seed(0)
__snake_case = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='np' , )
__snake_case = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image).max() < 5e-1
def _a ( self) -> Union[str, Any]:
__snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy')
__snake_case = init_image.resize((5_1_2, 5_1_2))
__snake_case = 'CompVis/stable-diffusion-v1-4'
__snake_case = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler')
__snake_case = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
__snake_case = 'A black colored car'
__snake_case = 'A blue colored car'
__snake_case = torch.manual_seed(0)
__snake_case = pipe(
prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='np' , )
__snake_case = output.images
assert np.abs(image - expected_image).max() < 2e-2
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
import math
import random
def A ( snake_case__ : float , snake_case__ : bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase__ : List[str] = 0.02
def A ( snake_case__ : int , snake_case__ : int ) -> float:
'''simple docstring'''
__snake_case = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
__snake_case = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__snake_case = (expected / 100) - layer_a
# Error delta
__snake_case = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Dict = int(input("Expected value: "))
UpperCAmelCase__ : Optional[Any] = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ : str = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''esm'''
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0_2_6 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_="absolute" , lowercase_=True , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(pad_token_id=lowercase_ , mask_token_id=lowercase_ , **lowercase_)
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = emb_layer_norm_before
__snake_case = token_dropout
__snake_case = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.')
__snake_case = EsmFoldConfig()
elif isinstance(lowercase_ , lowercase_):
__snake_case = EsmFoldConfig(**lowercase_)
__snake_case = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!')
__snake_case = get_default_vocab_list()
else:
__snake_case = vocab_list
else:
__snake_case = None
__snake_case = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , lowercase_):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!')
def _a ( self) -> List[Any]:
__snake_case = super().to_dict()
if isinstance(self.esmfold_config , lowercase_):
__snake_case = self.esmfold_config.to_dict()
return output
@dataclass
class __lowercase :
__UpperCAmelCase = None
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = 128
__UpperCAmelCase = None
def _a ( self) -> int:
if self.trunk is None:
__snake_case = TrunkConfig()
elif isinstance(self.trunk , lowercase_):
__snake_case = TrunkConfig(**self.trunk)
def _a ( self) -> Optional[Any]:
__snake_case = asdict(self)
__snake_case = self.trunk.to_dict()
return output
@dataclass
class __lowercase :
__UpperCAmelCase = 48
__UpperCAmelCase = 1_024
__UpperCAmelCase = 128
__UpperCAmelCase = 32
__UpperCAmelCase = 32
__UpperCAmelCase = 32
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = False
__UpperCAmelCase = 4
__UpperCAmelCase = 128
__UpperCAmelCase = None
def _a ( self) -> int:
if self.structure_module is None:
__snake_case = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase_):
__snake_case = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}.")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F" {self.sequence_state_dim} and {self.sequence_state_dim}.")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}.")
__snake_case = self.sequence_state_dim // self.sequence_head_width
__snake_case = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.")
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}.")
def _a ( self) -> str:
__snake_case = asdict(self)
__snake_case = self.structure_module.to_dict()
return output
@dataclass
class __lowercase :
__UpperCAmelCase = 384
__UpperCAmelCase = 128
__UpperCAmelCase = 16
__UpperCAmelCase = 128
__UpperCAmelCase = 12
__UpperCAmelCase = 4
__UpperCAmelCase = 8
__UpperCAmelCase = 0.1
__UpperCAmelCase = 8
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 7
__UpperCAmelCase = 10
__UpperCAmelCase = 1e-8
__UpperCAmelCase = 1e5
def _a ( self) -> Dict:
return asdict(self)
def A ( ) -> Any:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
from statistics import mean
import numpy as np
def A ( snake_case__ : list , snake_case__ : list , snake_case__ : list , snake_case__ : int ) -> list:
'''simple docstring'''
__snake_case = 0
# Number of processes finished
__snake_case = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case = [0] * no_of_process
# List to include calculation results
__snake_case = [0] * no_of_process
# Sort by arrival time.
__snake_case = [burst_time[i] for i in np.argsort(snake_case__ )]
__snake_case = [process_name[i] for i in np.argsort(snake_case__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case = arrival_time[i]
__snake_case = 0
# Index showing the location of the process being performed
__snake_case = 0
# Saves the current response ratio.
__snake_case = 0
for i in range(0 , snake_case__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case = temp
__snake_case = i
# Calculate the turn around time
__snake_case = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A ( snake_case__ : list , snake_case__ : list , snake_case__ : list , snake_case__ : int ) -> list:
'''simple docstring'''
__snake_case = [0] * no_of_process
for i in range(0 , snake_case__ ):
__snake_case = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
UpperCAmelCase__ : str = 5
UpperCAmelCase__ : Union[str, Any] = ["A", "B", "C", "D", "E"]
UpperCAmelCase__ : Optional[Any] = [1, 2, 3, 4, 5]
UpperCAmelCase__ : Any = [1, 2, 3, 4, 5]
UpperCAmelCase__ : Optional[int] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
UpperCAmelCase__ : Optional[int] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : List[Any] = logging.getLogger(__name__)
@dataclass
class __lowercase :
__UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __lowercase :
__UpperCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A ( ) -> List[str]:
'''simple docstring'''
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
__snake_case = import_module('tasks' )
try:
__snake_case = getattr(snake_case__ , model_args.task_type )
__snake_case = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , snake_case__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case = token_classification_task.get_labels(data_args.labels )
__snake_case = dict(enumerate(snake_case__ ) )
__snake_case = len(snake_case__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__ )} , cache_dir=model_args.cache_dir , )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case = (
TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case = np.argmax(snake_case__ , axis=2 )
__snake_case , __snake_case = preds.shape
__snake_case = [[] for _ in range(snake_case__ )]
__snake_case = [[] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(snake_case__ : EvalPrediction ) -> Dict:
__snake_case , __snake_case = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(snake_case__ , snake_case__ ),
"precision": precision_score(snake_case__ , snake_case__ ),
"recall": recall_score(snake_case__ , snake_case__ ),
"f1": fa_score(snake_case__ , snake_case__ ),
}
# Data collator
__snake_case = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case = trainer.evaluate()
__snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
results.update(snake_case__ )
# Predict
if training_args.do_predict:
__snake_case = TokenClassificationDataset(
token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case , __snake_case , __snake_case = trainer.predict(snake_case__ )
__snake_case , __snake_case = align_predictions(snake_case__ , snake_case__ )
__snake_case = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , snake_case__ , snake_case__ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(snake_case__ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__ )
return results
def A ( snake_case__ : Any ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__UpperCAmelCase = '''CIDAS/clipseg-rd64-refined'''
__UpperCAmelCase = '''image_segmenter'''
__UpperCAmelCase = CLIPSegForImageSegmentation
__UpperCAmelCase = ['''image''', '''text''']
__UpperCAmelCase = ['''image''']
def __init__( self , *lowercase_ , **lowercase_) -> Any:
requires_backends(self , ['vision'])
super().__init__(*lowercase_ , **lowercase_)
def _a ( self , lowercase_ , lowercase_) -> str:
return self.pre_processor(text=[label] , images=[image] , padding=lowercase_ , return_tensors='pt')
def _a ( self , lowercase_) -> int:
with torch.no_grad():
__snake_case = self.model(**lowercase_).logits
return logits
def _a ( self , lowercase_) -> List[str]:
__snake_case = outputs.cpu().detach().numpy()
__snake_case = 0
__snake_case = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
def A ( snake_case__ : int = 1000 ) -> int:
'''simple docstring'''
__snake_case , __snake_case = 1, 1
__snake_case = []
for i in range(1 , n + 1 ):
__snake_case = prev_numerator + 2 * prev_denominator
__snake_case = prev_numerator + prev_denominator
if len(str(snake_case__ ) ) > len(str(snake_case__ ) ):
result.append(snake_case__ )
__snake_case = numerator
__snake_case = denominator
return len(snake_case__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowercase :
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=3_2 , lowercase_=3 , lowercase_=1_0 , lowercase_=[8, 1_6, 3_2, 6_4] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , lowercase_=["stage2", "stage3", "stage4"] , lowercase_=[2, 3, 4] , lowercase_=1 , ) -> Tuple:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = embeddings_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = scope
__snake_case = len(lowercase_)
__snake_case = out_features
__snake_case = out_indices
__snake_case = num_groups
def _a ( self) -> Union[str, Any]:
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> Optional[int]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = BitModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> str:
__snake_case = self.num_labels
__snake_case = BitForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int:
__snake_case = BitBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
__snake_case = None
__snake_case = BitBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _a ( self) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> int:
__snake_case = BitModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_)
def _a ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self) -> Optional[Any]:
return
@unittest.skip(reason='Bit does not output attentions')
def _a ( self) -> str:
pass
@unittest.skip(reason='Bit does not use inputs_embeds')
def _a ( self) -> List[Any]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings')
def _a ( self) -> Dict:
pass
def _a ( self) -> Dict:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
__snake_case = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def _a ( self) -> List[str]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_)
def _a ( self) -> Any:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(config=lowercase_)
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def _a ( self) -> str:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_))
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__snake_case = layer_type
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
@unittest.skip(reason='Bit does not use feedforward chunking')
def _a ( self) -> Union[str, Any]:
pass
def _a ( self) -> Optional[int]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _a ( self) -> List[str]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = BitModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def A ( ) -> List[str]:
'''simple docstring'''
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> str:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None
)
@slow
def _a ( self) -> List[Any]:
__snake_case = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=lowercase_ , return_tensors='pt').to(lowercase_)
# forward pass
with torch.no_grad():
__snake_case = model(**lowercase_)
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@require_torch
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
__UpperCAmelCase = BitConfig
__UpperCAmelCase = False
def _a ( self) -> List[Any]:
__snake_case = BitModelTester(self)
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def A ( snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case = args.pruning_method
__snake_case = args.threshold
__snake_case = args.model_name_or_path.rstrip('/' )
__snake_case = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
__snake_case = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
__snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__snake_case = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
__snake_case = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
__snake_case = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
__snake_case = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__ )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__snake_case = name[:-6]
__snake_case = model[f"{prefix_}mask_scores"]
__snake_case = TopKBinarizer.apply(snake_case__ , snake_case__ )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__snake_case = name[:-6]
__snake_case = model[f"{prefix_}mask_scores"]
__snake_case = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__ )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__snake_case = name[:-6]
__snake_case = model[f"{prefix_}mask_scores"]
__snake_case , __snake_case = -0.1, 1.1
__snake_case = torch.sigmoid(snake_case__ )
__snake_case = s * (r - l) + l
__snake_case = s_bar.clamp(min=0.0 , max=1.0 )
__snake_case = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
__snake_case = os.path.join(
os.path.dirname(snake_case__ ) , f"bertarized_{os.path.basename(snake_case__ )}" )
if not os.path.isdir(snake_case__ ):
shutil.copytree(snake_case__ , snake_case__ )
print(f"\nCreated folder {target_model_path}" )
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from typing import List
import numpy as np
def A ( snake_case__ : dict ) -> int:
'''simple docstring'''
__snake_case = {key: len(snake_case__ ) for key, value in gen_kwargs.items() if isinstance(snake_case__ , snake_case__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
__snake_case = max(lists_lengths.values() , default=0 )
return max(1 , snake_case__ )
def A ( snake_case__ : int , snake_case__ : int ) -> List[range]:
'''simple docstring'''
__snake_case = []
for group_idx in range(snake_case__ ):
__snake_case = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__snake_case = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__snake_case = range(snake_case__ , start + num_shards_to_add )
shards_indices_per_group.append(snake_case__ )
return shards_indices_per_group
def A ( snake_case__ : dict , snake_case__ : int ) -> List[dict]:
'''simple docstring'''
__snake_case = _number_of_shards_in_gen_kwargs(snake_case__ )
if num_shards == 1:
return [dict(snake_case__ )]
else:
__snake_case = _distribute_shards(num_shards=snake_case__ , max_num_jobs=snake_case__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(snake_case__ , snake_case__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(snake_case__ ) )
]
def A ( snake_case__ : List[dict] ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , snake_case__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A ( snake_case__ : np.random.Generator , snake_case__ : dict ) -> dict:
'''simple docstring'''
__snake_case = {len(snake_case__ ) for value in gen_kwargs.values() if isinstance(snake_case__ , snake_case__ )}
__snake_case = {}
for size in list_sizes:
__snake_case = list(range(snake_case__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__snake_case = dict(snake_case__ )
for key, value in shuffled_kwargs.items():
if isinstance(snake_case__ , snake_case__ ):
__snake_case = [value[i] for i in indices_per_size[len(snake_case__ )]]
return shuffled_kwargs
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase__ : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: "
UpperCAmelCase__ : Tuple = "=======\n>>>>>>>\n"
UpperCAmelCase__ : Tuple = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
UpperCAmelCase__ : List[str] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def A ( snake_case__ : Namespace ) -> Union[str, Any]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowercase ( lowerCamelCase__ ):
@staticmethod
def _a ( lowercase_) -> int:
__snake_case = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowercase_ , required=lowercase_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowercase_ , required=lowercase_ , help='Path to the HuggingFace Datasets folder.')
train_parser.set_defaults(func=lowercase_)
def __init__( self , lowercase_ , lowercase_ , *lowercase_) -> Any:
__snake_case = get_logger('datasets-cli/converting')
__snake_case = tfds_path
__snake_case = datasets_directory
def _a ( self) -> Dict:
if os.path.isdir(self._tfds_path):
__snake_case = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
__snake_case = os.path.dirname(self._tfds_path)
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.')
__snake_case = os.path.abspath(self._datasets_directory)
self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
__snake_case = []
__snake_case = []
__snake_case = {}
if os.path.isdir(self._tfds_path):
__snake_case = os.listdir(lowercase_)
else:
__snake_case = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F"Looking at file {f_name}")
__snake_case = os.path.join(lowercase_ , lowercase_)
__snake_case = os.path.join(lowercase_ , lowercase_)
if not os.path.isfile(lowercase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file')
continue
with open(lowercase_ , encoding='utf-8') as f:
__snake_case = f.readlines()
__snake_case = []
__snake_case = False
__snake_case = False
__snake_case = []
for line in lines:
__snake_case = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__snake_case = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
__snake_case = ''
continue
elif "from absl import logging" in out_line:
__snake_case = 'from datasets import logging\n'
elif "getLogger" in out_line:
__snake_case = out_line.replace('getLogger' , 'get_logger')
elif any(expression in out_line for expression in TO_HIGHLIGHT):
__snake_case = True
__snake_case = list(filter(lambda lowercase_: e in out_line , lowercase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase_) + '\n')
out_lines.append(lowercase_)
out_lines.append(lowercase_)
continue
else:
for pattern, replacement in TO_CONVERT:
__snake_case = re.sub(lowercase_ , lowercase_ , lowercase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__snake_case = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowercase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(','))
__snake_case = 'from . import ' + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__snake_case = True
out_lines.append(lowercase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__snake_case = f_name.replace('.py' , '')
__snake_case = os.path.join(lowercase_ , lowercase_)
__snake_case = os.path.join(lowercase_ , lowercase_)
os.makedirs(lowercase_ , exist_ok=lowercase_)
self._logger.info(F"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowercase_)
if needs_manual_update:
with_manual_update.append(lowercase_)
with open(lowercase_ , 'w' , encoding='utf-8') as f:
f.writelines(lowercase_)
self._logger.info(F"Converted in {output_file}")
for utils_file in utils_files:
try:
__snake_case = os.path.basename(lowercase_)
__snake_case = imports_to_builder_map[f_name.replace('.py' , '')]
self._logger.info(F"Moving {dest_folder} to {utils_file}")
shutil.copy(lowercase_ , lowercase_)
except KeyError:
self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.")
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import json
import sys
def A ( snake_case__ : List[Any] , snake_case__ : str ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case__ , encoding='utf-8' ) as f:
__snake_case = json.load(snake_case__ )
__snake_case = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(snake_case__ ):
__snake_case = results[benchmark_name]
__snake_case = benchmark_name.split('/' )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
__snake_case = '| metric |'
__snake_case = '|--------|'
__snake_case = '| new / old (diff) |'
for metric_name in sorted(snake_case__ ):
__snake_case = benchmark_res[metric_name]
__snake_case = metric_vals['new']
__snake_case = metric_vals.get('old' , snake_case__ )
__snake_case = metric_vals.get('diff' , snake_case__ )
__snake_case = f" {new_val:f}" if isinstance(snake_case__ , (int, float) ) else 'None'
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(snake_case__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(snake_case__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(snake_case__ ) )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = sys.argv[1]
UpperCAmelCase__ : Any = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def A ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : str=False ) -> Dict:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__snake_case = os.path.abspath(snake_case__ )
logger.info(f"Loading PyTorch weights from {pt_path}" )
__snake_case = torch.load(snake_case__ , map_location='cpu' )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
__snake_case = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__snake_case = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def A ( snake_case__ : Tuple[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, jnp.ndarray] , snake_case__ : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ : Tuple[str] ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__snake_case = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__snake_case = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__snake_case = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__snake_case = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
__snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__snake_case = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
__snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__snake_case = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__snake_case = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__snake_case = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__snake_case = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__snake_case = pt_tuple_key[-2] + '_v'
if name is not None:
__snake_case = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A ( snake_case__ : List[Any] , snake_case__ : Dict ) -> Any:
'''simple docstring'''
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__snake_case = flax_model.params['params']
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(snake_case__ )
__snake_case = {}
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__snake_case = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> Dict:
'''simple docstring'''
import torch
# Load the index
__snake_case = {}
for shard_file in shard_filenames:
# load using msgpack utils
__snake_case = torch.load(snake_case__ )
__snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
__snake_case = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__snake_case = flax_model.params['params']
__snake_case = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__snake_case = flax_model.params
__snake_case = flatten_dict(snake_case__ )
__snake_case = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__snake_case = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__snake_case = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__snake_case = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = pt_tuple_key[1:]
# Correctly rename weight parameters
__snake_case , __snake_case = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
__snake_case = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__snake_case = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
__snake_case = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
__snake_case = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def A ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = os.path.abspath(snake_case__ )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
__snake_case = getattr(snake_case__ , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , 'rb' ) as state_f:
try:
__snake_case = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def A ( snake_case__ : List[str] , snake_case__ : str ) -> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
__snake_case = flatten_dict(snake_case__ )
__snake_case = pt_model.state_dict()
__snake_case = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__snake_case = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__snake_case = []
__snake_case = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case = flax_key_tuple[0] == pt_model.base_model_prefix
__snake_case = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__snake_case = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__snake_case = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
__snake_case = flax_key_tuple[:-1] + ('weight',)
__snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__snake_case = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__snake_case = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__snake_case = '.'.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__snake_case = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__snake_case = key.split('.' )
__snake_case = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__snake_case = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__snake_case = key_components[-2] + '_v'
if name is not None:
__snake_case = key_components[:-3] + [name]
__snake_case = '.'.join(snake_case__ )
__snake_case = key
if flax_key in special_pt_names:
__snake_case = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__snake_case = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
__snake_case = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
__snake_case = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(snake_case__ ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
' use it for predictions and inference.' )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
'If your task is similar to the task the model of the checkpoint was trained on, '
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 721 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
__snake_case = k.replace(snake_case__ , snake_case__ )
return k
def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
__snake_case = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
__snake_case = PegasusConfig(**snake_case__ )
__snake_case = PegasusForConditionalGeneration(snake_case__ )
__snake_case = torch_model.model.state_dict()
__snake_case = {}
for k, v in tf_weights.items():
__snake_case = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
__snake_case = v.T
__snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
__snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__snake_case = mapping['shared.weight']
__snake_case = mapping['shared.weight']
__snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
__snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
__snake_case = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
__snake_case = tf.train.list_variables(snake_case__ )
__snake_case = {}
__snake_case = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
__snake_case = any(pat in name for pat in ignore_name )
if skip_key:
continue
__snake_case = tf.train.load_variable(snake_case__ , snake_case__ )
__snake_case = array
return tf_weights
def A ( snake_case__ : str , snake_case__ : str ) -> Tuple:
'''simple docstring'''
# save tokenizer first
__snake_case = Path(snake_case__ ).parent.name
__snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
__snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
__snake_case = get_tf_weights_as_numpy(snake_case__ )
__snake_case = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
__snake_case = task_specific_params
__snake_case = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
__snake_case = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase__ : int = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase__ : str = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 676 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCAmelCase__ : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = {}
with open(snake_case__ , 'r' ) as file:
for line_number, line in enumerate(snake_case__ ):
__snake_case = line.strip()
if line:
__snake_case = line.split()
__snake_case = line_number
__snake_case = words[0]
__snake_case = value
return result
def A ( snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('.' ):
__snake_case = getattr(snake_case__ , snake_case__ )
__snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
__snake_case = PARAM_MAPPING[full_name.split('.' )[-1]]
__snake_case = 'param'
if weight_type is not None and weight_type != "param":
__snake_case = getattr(snake_case__ , snake_case__ ).shape
elif weight_type is not None and weight_type == "param":
__snake_case = hf_pointer
for attribute in hf_param_name.split('.' ):
__snake_case = getattr(snake_case__ , snake_case__ )
__snake_case = shape_pointer.shape
# let's reduce dimension
__snake_case = value[0]
else:
__snake_case = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__snake_case = getattr(snake_case__ , snake_case__ )
__snake_case = value
else:
__snake_case = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
__snake_case = PARAM_MAPPING[full_name.split('.' )[-1]]
__snake_case = 'param'
if weight_type is not None and weight_type != "param":
__snake_case = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case = '.'.join([key, hf_param_name] )
else:
__snake_case = key
__snake_case = value if 'lm_head' in full_key else value[0]
UpperCAmelCase__ : Tuple = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def A ( snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str]=None , snake_case__ : str=None ) -> str:
'''simple docstring'''
__snake_case = False
for key, mapped_key in MAPPING.items():
__snake_case = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(snake_case__ )[0].split('.' )[-2]
__snake_case = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
__snake_case = 'weight_g'
elif "weight_v" in name:
__snake_case = 'weight_v'
elif "bias" in name:
__snake_case = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = 'weight'
else:
__snake_case = None
if hf_dict is not None:
rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return is_used
return is_used
def A ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
__snake_case = True
else:
__snake_case = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__ )
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f"Unused weights: {unused_weights}" )
def A ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Dict ) -> Any:
'''simple docstring'''
__snake_case = full_name.split('conv_layers.' )[-1]
__snake_case = name.split('.' )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__snake_case = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__snake_case = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__snake_case = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__snake_case = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def A ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=True , snake_case__ : Optional[int]=False ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
__snake_case = WavaVecaConfig.from_pretrained(snake_case__ )
else:
__snake_case = WavaVecaConfig()
if is_seq_class:
__snake_case = read_txt_into_dict(snake_case__ )
__snake_case = idalabel
__snake_case = WavaVecaForSequenceClassification(snake_case__ )
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
feature_extractor.save_pretrained(snake_case__ )
elif is_finetuned:
if dict_path:
__snake_case = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case = target_dict.pad_index
__snake_case = target_dict.bos_index
__snake_case = target_dict.eos_index
__snake_case = len(target_dict.symbols )
__snake_case = os.path.join(snake_case__ , 'vocab.json' )
if not os.path.isdir(snake_case__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case = 0
__snake_case = 1
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(snake_case__ , snake_case__ )
__snake_case = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , )
__snake_case = True if config.feat_extract_norm == 'layer' else False
__snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
__snake_case = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
__snake_case = WavaVecaForCTC(snake_case__ )
else:
__snake_case = WavaVecaForPreTraining(snake_case__ )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__snake_case = argparse.Namespace(task='audio_pretraining' )
__snake_case = fairseq.tasks.setup_task(snake_case__ )
__snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ )
__snake_case = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCAmelCase__ : Tuple = parser.parse_args()
UpperCAmelCase__ : Dict = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCAmelCase__ : Optional[int] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def A ( snake_case__ : Tuple , snake_case__ : List[str] ) -> Tuple:
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def A ( snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
__snake_case = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
__snake_case = TestCommand(*snake_case__ )
test_command.run()
__snake_case = os.path.join(snake_case__ , 'README.md' )
assert os.path.exists(snake_case__ )
__snake_case = DatasetInfosDict.from_directory(snake_case__ )
__snake_case = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 235_1563,
'num_examples': 1_0000,
},
{
'name': 'validation',
'num_bytes': 23_8418,
'num_examples': 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case = getattr(dataset_infos['default'] , snake_case__ ), getattr(expected_dataset_infos['default'] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 701 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple:
super().__init__(*lowercase_ , **lowercase_)
__snake_case = eval_examples
__snake_case = post_process_function
def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
__snake_case = gen_kwargs.copy()
__snake_case = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__snake_case = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__snake_case = gen_kwargs
__snake_case = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case = self.get_eval_dataloader(lowercase_)
__snake_case = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_)
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
__snake_case = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]:
__snake_case = gen_kwargs.copy()
__snake_case = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case = self.compute_metrics
__snake_case = None
__snake_case = time.time()
__snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
__snake_case = compute_metrics
__snake_case = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict')
__snake_case = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"{metric_key_prefix}_"):
__snake_case = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 676 | 0 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : int = 10
@require_sentencepiece
@require_tokenizers
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = SpeechaTextTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def _a ( self) -> Optional[Any]:
super().setUp()
__snake_case = sp.SentencePieceProcessor()
spm_model.Load(lowercase_)
__snake_case = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(lowercase_))]
__snake_case = dict(zip(lowercase_ , range(len(lowercase_))))
__snake_case = Path(self.tmpdirname)
save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES['spm_file'])
__snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _a ( self) -> Tuple:
__snake_case = '<pad>'
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def _a ( self) -> List[Any]:
__snake_case = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(lowercase_) , 1_0_0_1)
def _a ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1)
def _a ( self) -> Tuple:
__snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
__snake_case = tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
__snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__snake_case = tokenizer.convert_tokens_to_ids(lowercase_)
self.assertListEqual(lowercase_ , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8])
__snake_case = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertListEqual(
lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _a ( self) -> Tuple:
# fmt: off
__snake_case = {'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class __lowercase ( unittest.TestCase ):
__UpperCAmelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
__UpperCAmelCase = '''C\'est trop cool'''
__UpperCAmelCase = '''Esto es genial'''
@classmethod
def _a ( cls) -> Dict:
__snake_case = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def _a ( self) -> Any:
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 1_1)
def _a ( self) -> Dict:
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0)
def _a ( self) -> Optional[Any]:
self.assertIn(lowercase_ , self.tokenizer.all_special_ids)
__snake_case = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
__snake_case = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_)
__snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertNotIn(self.tokenizer.eos_token , lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = 'fr'
__snake_case = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , lowercase_)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def _a ( self) -> Optional[Any]:
__snake_case = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
__snake_case = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase__ : Dict = get_tests_dir("fixtures")
UpperCAmelCase__ : Optional[int] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
UpperCAmelCase__ : str = get_tests_dir("fixtures/dummy-config.json")
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Union[str, Any]:
__snake_case = 0
def _a ( self) -> Optional[int]:
__snake_case = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_).to_dict()
config_dict.pop('feature_extractor_type')
__snake_case = WavaVecaFeatureExtractor(**lowercase_)
# save in new folder
model_config.save_pretrained(lowercase_)
config.save_pretrained(lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> List[Any]:
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
def _a ( self) -> List[Any]:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case = AutoFeatureExtractor.from_pretrained('bert-base')
def _a ( self) -> Tuple:
with self.assertRaisesRegex(
lowercase_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_ , revision='aaaaaa')
def _a ( self) -> Tuple:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model')
def _a ( self) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_):
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_):
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_ , trust_remote_code=lowercase_)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
def _a ( self) -> Tuple:
try:
AutoConfig.register('custom' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_):
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = CustomFeatureExtractor.from_pretrained(lowercase_)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowercase_)
__snake_case = AutoFeatureExtractor.from_pretrained(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _a ( self) -> Optional[Any]:
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = True
try:
AutoConfig.register('custom' , lowercase_)
AutoFeatureExtractor.register(lowercase_ , lowercase_)
# If remote code is not set, the default is to use local
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
__snake_case = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(not hasattr(lowercase_ , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 703 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase__ : Any = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict:
__snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py')
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase_)
if only_modules:
__snake_case = file.split('.')[0]
try:
__snake_case = getattr(lowercase_ , lowercase_)
__snake_case = doctest.DocTestSuite(lowercase_)
__snake_case = unittest.TextTestRunner().run(lowercase_)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _a ( self) -> str:
__snake_case = Path('src/transformers')
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = Path('src/transformers')
__snake_case = 'tokenization'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> List[str]:
__snake_case = Path('src/transformers')
__snake_case = 'configuration'
self.analyze_directory(lowercase_ , identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('src/transformers')
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase_ , n_identifier=lowercase_)
def _a ( self) -> Dict:
__snake_case = Path('docs/source')
__snake_case = ['favicon.ico']
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
| 676 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=2 , lowercase_=9_9 , lowercase_=0 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_2 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_="last" , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_lengths
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = gelu_activation
__snake_case = sinusoidal_embeddings
__snake_case = causal
__snake_case = asm
__snake_case = n_langs
__snake_case = vocab_size
__snake_case = n_special
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = summary_type
__snake_case = use_proj
__snake_case = scope
def _a ( self) -> Dict:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_input_lengths:
__snake_case = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , 2).float()
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self) -> str:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = FlaubertModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , lengths=lowercase_ , langs=lowercase_)
__snake_case = model(lowercase_ , langs=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = FlaubertWithLMHeadModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Dict:
__snake_case = FlaubertForQuestionAnsweringSimple(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
__snake_case = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
__snake_case = FlaubertForQuestionAnswering(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
__snake_case = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , p_mask=lowercase_ , )
__snake_case = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , )
((__snake_case ) , ) = result_with_labels.to_tuple()
__snake_case = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_)
((__snake_case ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
__snake_case = FlaubertForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
__snake_case = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
__snake_case = self.num_labels
__snake_case = FlaubertForTokenClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
__snake_case = self.num_choices
__snake_case = FlaubertForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__snake_case = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__snake_case = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _a ( self) -> Optional[int]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Dict:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> str:
__snake_case = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
return inputs_dict
def _a ( self) -> Optional[int]:
__snake_case = FlaubertModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , emb_dim=3_7)
def _a ( self) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_)
def _a ( self) -> Any:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_)
def _a ( self) -> Tuple:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_)
@slow
def _a ( self) -> Optional[int]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = FlaubertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@slow
@require_torch_gpu
def _a ( self) -> Optional[int]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__snake_case = True
__snake_case = model_class(config=lowercase_)
__snake_case = self._prepare_for_class(lowercase_ , lowercase_)
__snake_case = torch.jit.trace(
lowercase_ , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_ , os.path.join(lowercase_ , 'traced_model.pt'))
__snake_case = torch.jit.load(os.path.join(lowercase_ , 'traced_model.pt') , map_location=lowercase_)
loaded(inputs_dict['input_ids'].to(lowercase_) , inputs_dict['attention_mask'].to(lowercase_))
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def _a ( self) -> List[str]:
__snake_case = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased')
__snake_case = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
with torch.no_grad():
__snake_case = model(lowercase_)[0]
__snake_case = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , lowercase_)
__snake_case = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4))
| 704 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
def A ( snake_case__ : List[Any] , snake_case__ : Any=False ) -> str:
'''simple docstring'''
__snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def A ( snake_case__ : List[str] , snake_case__ : str , snake_case__ : Any=False ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case = ''
else:
__snake_case = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
__snake_case = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__snake_case = in_proj_weight[
: config.hidden_size, :
]
__snake_case = in_proj_bias[: config.hidden_size]
__snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case = in_proj_weight[
-config.hidden_size :, :
]
__snake_case = in_proj_bias[-config.hidden_size :]
def A ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Dict:
'''simple docstring'''
__snake_case = dct.pop(snake_case__ )
__snake_case = val
def A ( ) -> str:
'''simple docstring'''
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def A ( snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = DeiTConfig()
# all deit models have fine-tuned heads
__snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case = 1000
__snake_case = 'huggingface/label-files'
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
__snake_case = {int(snake_case__ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = int(deit_name[-6:-4] )
__snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
__snake_case = 192
__snake_case = 768
__snake_case = 12
__snake_case = 3
elif deit_name[9:].startswith('small' ):
__snake_case = 384
__snake_case = 1536
__snake_case = 12
__snake_case = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
__snake_case = 1024
__snake_case = 4096
__snake_case = 24
__snake_case = 16
# load original model from timm
__snake_case = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case = timm_model.state_dict()
__snake_case = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
__snake_case = DeiTForImageClassificationWithTeacher(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__snake_case = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__snake_case = DeiTImageProcessor(size=snake_case__ , crop_size=config.image_size )
__snake_case = image_processor(images=prepare_img() , return_tensors='pt' )
__snake_case = encoding['pixel_values']
__snake_case = model(snake_case__ )
__snake_case = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1e-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 705 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> List[str]:
__snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]})
return dset
def _a ( self) -> Optional[int]:
import faiss
__snake_case = self._create_dummy_dataset()
__snake_case = dset.map(
lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_)
__snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT)
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
dset.drop_index('vecs')
def _a ( self) -> str:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> int:
import faiss
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name)
dset.load_faiss_index('vecs2' , tmp_file.name)
os.unlink(tmp_file.name)
__snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa))
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
def _a ( self) -> List[Any]:
__snake_case = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs')
dset.drop_index('vecs')
self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa)))
def _a ( self) -> Any:
from elasticsearch import Elasticsearch
__snake_case = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0)
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
__snake_case = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowercase_)
__snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29')
self.assertEqual(examples['filename'][0] , 'my_name-train_29')
@require_faiss
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[int]:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsNotNone(index.faiss_index)
self.assertEqual(index.faiss_index.ntotal , 5)
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa))
self.assertEqual(index.faiss_index.ntotal , 1_0)
# single query
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1))
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
# batched queries
__snake_case = np.eye(5 , dtype=np.floataa)[::-1]
__snake_case , __snake_case = index.search_batch(lowercase_)
self.assertRaises(lowercase_ , index.search_batch , queries[0])
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([4, 3, 2, 1, 0] , lowercase_)
def _a ( self) -> str:
import faiss
__snake_case = FaissIndex(string_factory='Flat')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
__snake_case = FaissIndex(string_factory='LSH')
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexLSH)
with self.assertRaises(lowercase_):
__snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5))
def _a ( self) -> Optional[int]:
import faiss
__snake_case = faiss.IndexFlat(5)
__snake_case = FaissIndex(custom_index=lowercase_)
index.add_vectors(np.eye(5 , dtype=np.floataa))
self.assertIsInstance(index.faiss_index , faiss.IndexFlat)
def _a ( self) -> Tuple:
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT)
index.add_vectors(np.eye(5 , dtype=np.floataa))
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file:
index.save(tmp_file.name)
__snake_case = FaissIndex.load(tmp_file.name)
os.unlink(tmp_file.name)
__snake_case = np.zeros(5 , dtype=np.floataa)
__snake_case = 1
__snake_case , __snake_case = index.search(lowercase_)
self.assertGreater(scores[0] , 0)
self.assertEqual(indices[0] , 1)
@require_faiss
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__snake_case = 'index.faiss'
__snake_case = f"mock://{index_name}"
index.save(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options )
__snake_case = np.zeros(5 , dtype=np.floataa )
__snake_case = 1
__snake_case , __snake_case = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowercase ( lowerCamelCase__ ):
def _a ( self) -> Optional[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch(
'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk:
__snake_case = Elasticsearch()
__snake_case = {'acknowledged': True}
__snake_case = ElasticSearchIndex(es_client=lowercase_)
mocked_bulk.return_value([(True, None)] * 3)
index.add_documents(['foo', 'bar', 'foobar'])
# single query
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# single query with timeout
__snake_case = 'foo'
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0)
self.assertEqual(scores[0] , 1)
self.assertEqual(indices[0] , 0)
# batched queries
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
# batched queries with timeout
__snake_case = ['foo', 'bar', 'foobar']
__snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0)
__snake_case = [scores[0] for scores in total_scores]
__snake_case = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase_) , 0)
self.assertListEqual([1, 1, 1] , lowercase_)
| 676 | 0 |
from __future__ import annotations
from math import pi
def A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = args.log_outputs
__snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__snake_case = load_metric('wer' )
__snake_case = load_metric('cer' )
# compute metrics
__snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] )
__snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__snake_case = f"WER: {wer_result}\nCER: {cer_result}"
print(snake_case__ )
with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__snake_case = f"log_{dataset_id}_predictions.txt"
__snake_case = f"log_{dataset_id}_targets.txt"
with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t:
# mapping function to write output
def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
p.write(f"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case__ , with_indices=snake_case__ )
def A ( snake_case__ : str ) -> str:
'''simple docstring'''
__snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__snake_case = re.sub(snake_case__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__snake_case = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__snake_case = ' '.join(text.split(snake_case__ ) )
return text
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
# load dataset
__snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__snake_case = AutoFeatureExtractor.from_pretrained(args.model_id )
__snake_case = feature_extractor.sampling_rate
# resample audio
__snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
__snake_case = 0 if torch.cuda.is_available() else -1
__snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : Optional[Any] ):
__snake_case = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__snake_case = prediction['text']
__snake_case = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ , snake_case__ )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
UpperCAmelCase__ : str = parser.parse_args()
main(args)
| 676 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : str = logging.get_logger(__name__)
class __lowercase :
def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_=None) -> Union[str, Any]:
if not conversation_id:
__snake_case = uuid.uuida()
if past_user_inputs is None:
__snake_case = []
if generated_responses is None:
__snake_case = []
__snake_case = conversation_id
__snake_case = past_user_inputs
__snake_case = generated_responses
__snake_case = text
def __eq__( self , lowercase_) -> Optional[int]:
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _a ( self , lowercase_ , lowercase_ = False) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\".")
__snake_case = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input")
else:
__snake_case = text
def _a ( self) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
__snake_case = None
def _a ( self , lowercase_) -> Union[str, Any]:
self.generated_responses.append(lowercase_)
def _a ( self) -> List[Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self) -> List[Any]:
__snake_case = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__snake_case = 'user' if is_user else 'bot'
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
lowerCamelCase__ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class __lowercase ( lowerCamelCase__ ):
def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]:
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
__snake_case = self.tokenizer.eos_token
def _a ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> Any:
__snake_case = {}
__snake_case = {}
__snake_case = {}
if min_length_for_response is not None:
__snake_case = min_length_for_response
if minimum_tokens is not None:
__snake_case = minimum_tokens
if "max_length" in generate_kwargs:
__snake_case = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowercase_ , lowercase_=0 , **lowercase_) -> Union[str, Any]:
__snake_case = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def _a ( self , lowercase_ , lowercase_=3_2) -> Dict[str, Any]:
if not isinstance(lowercase_ , lowercase_):
raise ValueError('ConversationalPipeline, expects Conversation as inputs')
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. "
'Add user inputs with the conversation\'s `add_user_input` method')
if hasattr(self.tokenizer , '_build_conversation_input_ids'):
__snake_case = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__snake_case = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
__snake_case = torch.LongTensor([input_ids])
elif self.framework == "tf":
__snake_case = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def _a ( self , lowercase_ , lowercase_=1_0 , **lowercase_) -> int:
__snake_case = generate_kwargs.get('max_length' , self.model.config.max_length)
__snake_case = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})")
__snake_case = max_length - minimum_tokens
__snake_case = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
__snake_case = model_inputs['attention_mask'][:, -trim:]
__snake_case = model_inputs.pop('conversation')
__snake_case = max_length
__snake_case = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
__snake_case = 1
else:
__snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _a ( self , lowercase_ , lowercase_=True) -> Dict:
__snake_case = model_outputs['output_ids']
__snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
__snake_case = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def _a ( self , lowercase_) -> Dict:
__snake_case = self.tokenizer.eos_token_id
__snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
__snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 707 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def A ( *snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(snake_case__ , 'r' ) as fh:
fcntl.flock(snake_case__ , fcntl.LOCK_EX )
try:
print(*snake_case__ )
finally:
fcntl.flock(snake_case__ , fcntl.LOCK_UN )
UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
UpperCAmelCase__ : Any = torch.device("cuda", local_rank)
UpperCAmelCase__ : Union[str, Any] = socket.gethostname()
UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase__ : Optional[int] = dist.get_rank()
UpperCAmelCase__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 676 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=1_0 , lowercase_=3 , lowercase_=2 , lowercase_=2 , lowercase_=True , lowercase_=True , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0 , lowercase_=0.02 , lowercase_="divided_space_time" , lowercase_=None , ) -> List[Any]:
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = patch_size
__snake_case = num_frames
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = attention_type
__snake_case = initializer_range
__snake_case = scope
__snake_case = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case = (image_size // patch_size) ** 2
__snake_case = (num_frames) * self.num_patches_per_frame + 1
def _a ( self) -> Any:
__snake_case = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels)
__snake_case = self.get_config()
return config, pixel_values, labels
def _a ( self) -> Dict:
__snake_case = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case = self.num_labels
return config
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
__snake_case = TimesformerModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[str]:
__snake_case = TimesformerForVideoClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_)
# verify the logits shape
__snake_case = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , lowercase_)
def _a ( self) -> List[Any]:
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__UpperCAmelCase = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> str:
__snake_case = TimesformerModelTester(self)
__snake_case = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7)
def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> Any:
__snake_case = copy.deepcopy(lowercase_)
if return_labels:
if model_class in get_values(lowercase_):
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
return inputs_dict
def _a ( self) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds')
def _a ( self) -> Dict:
pass
def _a ( self) -> Union[str, Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear))
def _a ( self) -> List[str]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_)
__snake_case = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_)
def _a ( self) -> Tuple:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_)
@slow
def _a ( self) -> Union[str, Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TimesformerModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _a ( self) -> Optional[Any]:
if not self.has_attentions:
pass
else:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
for model_class in self.all_model_classes:
__snake_case = self.model_tester.seq_length
__snake_case = self.model_tester.num_frames
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_))
__snake_case = outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_))
__snake_case = outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case = len(lowercase_)
# Check attention is always last and order is fine
__snake_case = True
__snake_case = True
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_))
self.assertEqual(out_len + 1 , len(lowercase_))
__snake_case = outputs.attentions
self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _a ( self) -> List[str]:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_):
__snake_case = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_))
__snake_case = outputs.hidden_states
__snake_case = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_) , lowercase_)
__snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def A ( ) -> str:
'''simple docstring'''
__snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__snake_case = np.load(snake_case__ )
return list(snake_case__ )
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _a ( self) -> Optional[int]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def _a ( self) -> Any:
__snake_case = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400').to(
lowercase_)
__snake_case = self.default_image_processor
__snake_case = prepare_video()
__snake_case = image_processor(video[:8] , return_tensors='pt').to(lowercase_)
# forward pass
with torch.no_grad():
__snake_case = model(**lowercase_)
# verify the logits
__snake_case = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape , lowercase_)
__snake_case = torch.tensor([-0.3016, -0.7713, -0.4205]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
| 708 |
from datetime import datetime
import requests
def A ( snake_case__ : str ) -> bytes:
'''simple docstring'''
__snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(snake_case__ ).content
if __name__ == "__main__":
UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 676 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = StableUnCLIPPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = 3_2
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0)
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0)
__snake_case = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0)
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=lowercase_)
__snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__snake_case = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0)
__snake_case = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0)
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def _a ( self , lowercase_ , lowercase_=0) -> Dict:
if str(lowercase_).startswith('mps'):
__snake_case = torch.manual_seed(lowercase_)
else:
__snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_)
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _a ( self) -> str:
__snake_case = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_)
def _a ( self) -> Tuple:
__snake_case = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase_)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> Tuple:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy')
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device='cpu').manual_seed(0)
__snake_case = pipe('anime turle' , generator=lowercase_ , output_type='np')
__snake_case = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
def _a ( self) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa)
__snake_case = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 709 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __lowercase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def _a ( self) -> Union[str, Any]:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length])
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__snake_case = ids_tensor([self.batch_size] , self.num_choices)
__snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self) -> Tuple:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]:
__snake_case = OpenLlamaModel(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_)
__snake_case = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]:
__snake_case = True
__snake_case = OpenLlamaModel(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
__snake_case = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]:
__snake_case = True
__snake_case = True
__snake_case = OpenLlamaForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size)
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1)
__snake_case = torch.cat([input_mask, next_mask] , dim=-1)
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
__snake_case = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _a ( self) -> Optional[Any]:
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def _a ( self) -> Tuple:
__snake_case = OpenLlamaModelTester(self)
__snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7)
def _a ( self) -> int:
self.config_tester.run_common_tests()
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*lowercase_)
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> str:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'single_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _a ( self) -> int:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = 3
__snake_case = 'multi_label_classification'
__snake_case = input_dict['input_ids']
__snake_case = input_ids.ne(1).to(lowercase_)
__snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__snake_case = OpenLlamaForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
__snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def _a ( self) -> List[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)])
def _a ( self , lowercase_) -> Optional[Any]:
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = ids_tensor([1, 1_0] , config.vocab_size)
__snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = OpenLlamaModel(lowercase_)
original_model.to(lowercase_)
original_model.eval()
__snake_case = original_model(lowercase_).last_hidden_state
__snake_case = original_model(lowercase_).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__snake_case = {'type': scaling_type, 'factor': 10.0}
__snake_case = OpenLlamaModel(lowercase_)
scaled_model.to(lowercase_)
scaled_model.eval()
__snake_case = scaled_model(lowercase_).last_hidden_state
__snake_case = scaled_model(lowercase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
| 676 | 0 |
def A ( snake_case__ : list[int] , snake_case__ : int ) -> bool:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__snake_case = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__snake_case = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__snake_case = subset[i - 1][j]
if arr[i - 1] <= j:
__snake_case = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCAmelCase__ : int = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
import functools
from typing import Any
def A ( snake_case__ : str , snake_case__ : list[str] ) -> bool:
'''simple docstring'''
# Validation
if not isinstance(snake_case__ , snake_case__ ) or len(snake_case__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(snake_case__ , snake_case__ ) or not all(
isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
__snake_case = {}
__snake_case = 'WORD_KEEPER'
for word in words:
__snake_case = trie
for c in word:
if c not in trie_node:
__snake_case = {}
__snake_case = trie_node[c]
__snake_case = True
__snake_case = len(snake_case__ )
# Dynamic programming method
@functools.cache
def is_breakable(snake_case__ : int ) -> bool:
if index == len_string:
return True
__snake_case = trie
for i in range(snake_case__ , snake_case__ ):
__snake_case = trie_node.get(string[i] , snake_case__ )
if trie_node is None:
return False
if trie_node.get(snake_case__ , snake_case__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case = 4
__snake_case = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 676 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( snake_case__ : Optional[int] , snake_case__ : Any=0.999 , snake_case__ : str="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__snake_case = []
for i in range(snake_case__ ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase = 2
@register_to_config
def __init__( self , lowercase_ = 1_0_0_0 , lowercase_ = 0.0_0085 , lowercase_ = 0.012 , lowercase_ = "linear" , lowercase_ = None , lowercase_ = "epsilon" , lowercase_ = False , lowercase_ = False , lowercase_ = 1.0 , lowercase_ = "linspace" , lowercase_ = 0 , ) -> List[str]:
if trained_betas is not None:
__snake_case = torch.tensor(lowercase_ , dtype=torch.floataa)
elif beta_schedule == "linear":
__snake_case = torch.linspace(lowercase_ , lowercase_ , lowercase_ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase_ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(lowercase_ , alpha_transform_type='cosine')
elif beta_schedule == "exp":
__snake_case = betas_for_alpha_bar(lowercase_ , alpha_transform_type='exp')
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}")
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(lowercase_ , lowercase_ , lowercase_)
__snake_case = use_karras_sigmas
def _a ( self , lowercase_ , lowercase_=None) -> Union[str, Any]:
if schedule_timesteps is None:
__snake_case = self.timesteps
__snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
__snake_case = 1 if len(lowercase_) > 1 else 0
else:
__snake_case = timestep.cpu().item() if torch.is_tensor(lowercase_) else timestep
__snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _a ( self) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _a ( self , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
__snake_case = self.index_for_timestep(lowercase_)
__snake_case = self.sigmas[step_index]
__snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , ) -> str:
__snake_case = num_inference_steps
__snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case = np.linspace(0 , num_train_timesteps - 1 , lowercase_ , dtype=lowercase_)[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , lowercase_) * step_ratio).round()[::-1].copy().astype(lowercase_)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(lowercase_ , 0 , -step_ratio)).round().copy().astype(lowercase_)
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.")
__snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
__snake_case = np.log(lowercase_)
__snake_case = np.interp(lowercase_ , np.arange(0 , len(lowercase_)) , lowercase_)
if self.config.use_karras_sigmas:
__snake_case = self._convert_to_karras(in_sigmas=lowercase_ , num_inference_steps=self.num_inference_steps)
__snake_case = np.array([self._sigma_to_t(lowercase_ , lowercase_) for sigma in sigmas])
__snake_case = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
__snake_case = torch.from_numpy(lowercase_).to(device=lowercase_)
__snake_case = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
__snake_case = torch.from_numpy(lowercase_)
__snake_case = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
if str(lowercase_).startswith('mps'):
# mps does not support float64
__snake_case = timesteps.to(lowercase_ , dtype=torch.floataa)
else:
__snake_case = timesteps.to(device=lowercase_)
# empty dt and derivative
__snake_case = None
__snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case = defaultdict(lowercase_)
def _a ( self , lowercase_ , lowercase_) -> List[str]:
# get log sigma
__snake_case = np.log(lowercase_)
# get distribution
__snake_case = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__snake_case = np.cumsum((dists >= 0) , axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
__snake_case = low_idx + 1
__snake_case = log_sigmas[low_idx]
__snake_case = log_sigmas[high_idx]
# interpolate sigmas
__snake_case = (low - log_sigma) / (low - high)
__snake_case = np.clip(lowercase_ , 0 , 1)
# transform interpolation to time range
__snake_case = (1 - w) * low_idx + w * high_idx
__snake_case = t.reshape(sigma.shape)
return t
def _a ( self , lowercase_ , lowercase_) -> torch.FloatTensor:
__snake_case = in_sigmas[-1].item()
__snake_case = in_sigmas[0].item()
__snake_case = 7.0 # 7.0 is the value used in the paper
__snake_case = np.linspace(0 , 1 , lowercase_)
__snake_case = sigma_min ** (1 / rho)
__snake_case = sigma_max ** (1 / rho)
__snake_case = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _a ( self) -> Tuple:
return self.dt is None
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[SchedulerOutput, Tuple]:
__snake_case = self.index_for_timestep(lowercase_)
# advance index counter by 1
__snake_case = timestep.cpu().item() if torch.is_tensor(lowercase_) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case = self.sigmas[step_index]
__snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__snake_case = self.sigmas[step_index - 1]
__snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case = 0
__snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case = sigma_hat if self.state_in_first_order else sigma_next
__snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.config.clip_sample:
__snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case = sigma_next - sigma_hat
# store for 2nd order step
__snake_case = derivative
__snake_case = dt
__snake_case = sample
else:
# 2. 2nd order / Heun's method
__snake_case = (sample - pred_original_sample) / sigma_next
__snake_case = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__snake_case = self.dt
__snake_case = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(lowercase_):
# mps does not support float64
__snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa)
__snake_case = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
__snake_case = self.timesteps.to(original_samples.device)
__snake_case = timesteps.to(original_samples.device)
__snake_case = [self.index_for_timestep(lowercase_ , lowercase_) for t in timesteps]
__snake_case = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
__snake_case = sigma.unsqueeze(-1)
__snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self) -> Tuple:
return self.config.num_train_timesteps
| 713 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"]
UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A ( snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def A ( snake_case__ : Tuple ) -> int:
'''simple docstring'''
class __lowercase :
def __init__( self , lowercase_) -> Optional[Any]:
__snake_case = metric_id
class __lowercase :
__UpperCAmelCase = [MetricMock(lowerCamelCase__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def _a ( self) -> str:
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def A ( snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "tmp_path" in args:
__snake_case = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(snake_case__ , match='https://huggingface.co/docs/evaluate' ):
func(*snake_case__ )
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
@dataclass
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowercase_) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__snake_case = deprecated_arg[3:]
__snake_case = not kwargs.pop(lowercase_)
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}")
__snake_case = kwargs.pop('tpu_name' , self.tpu_name)
__snake_case = kwargs.pop('device_idx' , self.device_idx)
__snake_case = kwargs.pop('eager_mode' , self.eager_mode)
__snake_case = kwargs.pop('use_xla' , self.use_xla)
super().__init__(**lowercase_)
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Name of TPU'''} , )
__UpperCAmelCase = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Benchmark models in eager model.'''} )
__UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'])
__snake_case = None
if self.tpu:
try:
if self.tpu_name:
__snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
else:
__snake_case = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__snake_case = None
return tpu
@cached_property
def _a ( self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'])
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu)
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
__snake_case = tf.distribute.TPUStrategy(self._setup_tpu)
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU')
__snake_case = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}")
else:
tf.config.set_visible_devices([] , 'GPU') # disable GPU
__snake_case = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}")
return strategy
@property
def _a ( self) -> bool:
requires_backends(self , ['tf'])
return self._setup_tpu is not None
@property
def _a ( self) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'])
return self._setup_strategy
@property
def _a ( self) -> Tuple:
requires_backends(self , ['tf'])
return tf.config.list_physical_devices('GPU')
@property
def _a ( self) -> int:
requires_backends(self , ['tf'])
if self.cuda:
return len(self.gpu_list)
return 0
@property
def _a ( self) -> bool:
return self.n_gpu > 0
| 715 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( snake_case__ : List[Any] ) -> Any:
'''simple docstring'''
__snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__snake_case = 4
__snake_case = 48
__snake_case = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = [6, 6, 6, 6]
__snake_case = 60
__snake_case = [6, 6, 6, 6]
__snake_case = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = 4
__snake_case = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__snake_case = 1
__snake_case = 1
__snake_case = 126
__snake_case = 7
__snake_case = 255.0
__snake_case = ''
return config
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__snake_case = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__snake_case = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__snake_case = 'layernorm.weight'
if name == "norm.bias":
__snake_case = 'layernorm.bias'
if "conv_first" in name:
__snake_case = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__snake_case = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__snake_case = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__snake_case = name.replace('upsample.2' , 'upsample.convolution_1' )
__snake_case = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__snake_case = 'swin2sr.' + name
return name
def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
__snake_case = key.split('.' )
__snake_case = int(key_split[1] )
__snake_case = int(key_split[4] )
__snake_case = config.embed_dim
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
pass
else:
__snake_case = val
return orig_state_dict
def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple:
'''simple docstring'''
__snake_case = get_config(snake_case__ )
__snake_case = SwinaSRForImageSuperResolution(snake_case__ )
model.eval()
__snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )
__snake_case = convert_state_dict(snake_case__ , snake_case__ )
__snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
__snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__snake_case = 126 if 'Jpeg' in checkpoint_url else 256
__snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case = transforms(snake_case__ ).unsqueeze(0 )
if config.num_channels == 1:
__snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
__snake_case = model(snake_case__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__snake_case = torch.Size([1, 3, 512, 512] )
__snake_case = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__snake_case = torch.Size([1, 3, 1024, 1024] )
__snake_case = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 )
print('Looks ok!' )
__snake_case = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 676 | 0 |
from __future__ import annotations
def A ( snake_case__ : int ) -> list[int]:
'''simple docstring'''
__snake_case = [True] * limit
__snake_case = False
__snake_case = False
__snake_case = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case = i * 2
while index < limit:
__snake_case = False
__snake_case = index + i
__snake_case = [2]
for i in range(3 , snake_case__ , 2 ):
if is_prime[i]:
primes.append(snake_case__ )
return primes
def A ( snake_case__ : int = 100_0000 ) -> int:
'''simple docstring'''
__snake_case = prime_sieve(snake_case__ )
__snake_case = 0
__snake_case = 0
for i in range(len(snake_case__ ) ):
for j in range(i + length , len(snake_case__ ) ):
__snake_case = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case = j - i
__snake_case = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 676 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 717 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 0 |
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ : List[Any] = re.compile(r"([A-Z]+)([A-Z][a-z])")
UpperCAmelCase__ : Tuple = re.compile(r"([a-z\d])([A-Z])")
UpperCAmelCase__ : Optional[Any] = re.compile(r"(?<!_)_(?!_)")
UpperCAmelCase__ : int = re.compile(r"(_{2,})")
UpperCAmelCase__ : List[str] = r"^\w+(\.\w+)*$"
UpperCAmelCase__ : List[Any] = r"<>:/\|?*"
def A ( snake_case__ : List[str] ) -> str:
'''simple docstring'''
__snake_case = _uppercase_uppercase_re.sub(r'\1_\2' , snake_case__ )
__snake_case = _lowercase_uppercase_re.sub(r'\1_\2' , snake_case__ )
return name.lower()
def A ( snake_case__ : List[str] ) -> int:
'''simple docstring'''
__snake_case = _single_underscore_re.split(snake_case__ )
__snake_case = [_multiple_underscores_re.split(snake_case__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != '' )
def A ( snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
if os.path.basename(snake_case__ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(snake_case__ )
def A ( snake_case__ : Optional[Any] , snake_case__ : Any ) -> str:
'''simple docstring'''
if os.path.basename(snake_case__ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , snake_case__ ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(snake_case__ )}-{split}"
def A ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : str=None ) -> Optional[int]:
'''simple docstring'''
__snake_case = filename_prefix_for_split(snake_case__ , snake_case__ )
if filetype_suffix:
prefix += f".{filetype_suffix}"
__snake_case = os.path.join(snake_case__ , snake_case__ )
return f"{filepath}*"
def A ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : str=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case = filename_prefix_for_split(snake_case__ , snake_case__ )
__snake_case = os.path.join(snake_case__ , snake_case__ )
if shard_lengths:
__snake_case = len(snake_case__ )
__snake_case = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(snake_case__ )]
if filetype_suffix:
__snake_case = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
__snake_case = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = '''table-transformer'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowercase_ , lowercase_):
__snake_case = backbone_config.get('model_type')
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_)
# set timm attributes to None
__snake_case , __snake_case , __snake_case = None, None, None
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = encoder_layers
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def _a ( self) -> int:
return self.encoder_attention_heads
@property
def _a ( self) -> int:
return self.d_model
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def _a ( self) -> float:
return 1e-5
@property
def _a ( self) -> int:
return 1_2
| 676 | 0 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 719 |
from maths.prime_check import is_prime
def A ( snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if is_prime(snake_case__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = 42
__UpperCAmelCase = None
def A ( snake_case__ : Dict , snake_case__ : List[str]=0.999 , snake_case__ : Tuple="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__snake_case = []
for i in range(snake_case__ ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ):
__UpperCAmelCase = 1
@register_to_config
def __init__( self , lowercase_ = 1_0_0_0 , lowercase_ = 0.0001 , lowercase_ = 0.02 , lowercase_ = "linear" , lowercase_ = None , lowercase_ = True , lowercase_ = True , lowercase_ = 0 , lowercase_ = "epsilon" , lowercase_ = 1.0 , **lowercase_ , ) -> Any:
if kwargs.get('set_alpha_to_one' , lowercase_) is not None:
__snake_case = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , lowercase_ , standard_warn=lowercase_)
__snake_case = kwargs['set_alpha_to_one']
if trained_betas is not None:
__snake_case = torch.tensor(lowercase_ , dtype=torch.floataa)
elif beta_schedule == "linear":
__snake_case = torch.linspace(lowercase_ , lowercase_ , lowercase_ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase_ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(lowercase_)
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}")
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__snake_case = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__snake_case = 1.0
# setable values
__snake_case = None
__snake_case = torch.from_numpy(np.arange(0 , lowercase_).copy().astype(np.intaa))
def _a ( self , lowercase_ , lowercase_ = None) -> torch.FloatTensor:
return sample
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps.")
__snake_case = num_inference_steps
__snake_case = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , lowercase_) * step_ratio).round().copy().astype(np.intaa)
__snake_case = torch.from_numpy(lowercase_).to(lowercase_)
self.timesteps += self.config.steps_offset
def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 0.0 , lowercase_ = False , lowercase_ = None , lowercase_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
__snake_case = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__snake_case = self.alphas_cumprod[timestep]
__snake_case = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__snake_case = model_output
elif self.config.prediction_type == "sample":
__snake_case = model_output
__snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__snake_case = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__snake_case = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
' `v_prediction`')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_)
def __len__( self) -> List[str]:
return self.config.num_train_timesteps
| 720 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]:
'''simple docstring'''
__snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
| 676 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.