code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
def constraint_to_multiple_of(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=0 , _UpperCamelCase=None ):
__lowerCAmelCase : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__lowerCAmelCase : str = math.floor(val / multiple ) * multiple
if x < min_val:
__lowerCAmelCase : Dict = math.ceil(val / multiple ) * multiple
return x
__lowerCAmelCase : Union[str, Any] = (output_size, output_size) if isinstance(snake_case_ , snake_case_ ) else output_size
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = get_image_size(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase : str = output_size
# determine new height and width
__lowerCAmelCase : Union[str, Any] = output_height / input_height
__lowerCAmelCase : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__lowerCAmelCase : List[str] = scale_width
else:
# fit height
__lowerCAmelCase : List[str] = scale_height
__lowerCAmelCase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case_ )
__lowerCAmelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case_ )
return (new_height, new_width)
class A__ ( _lowerCAmelCase):
A_ : Union[str, Any] = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 2_55 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_lowercase )
__lowerCAmelCase : List[str] = size if size is not None else {'height': 3_84, 'width': 3_84}
__lowerCAmelCase : Dict = get_size_dict(_lowercase )
__lowerCAmelCase : List[Any] = do_resize
__lowerCAmelCase : Any = size
__lowerCAmelCase : List[str] = keep_aspect_ratio
__lowerCAmelCase : str = ensure_multiple_of
__lowerCAmelCase : Dict = resample
__lowerCAmelCase : List[str] = do_rescale
__lowerCAmelCase : Tuple = rescale_factor
__lowerCAmelCase : Union[str, Any] = do_normalize
__lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Union[str, Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}" )
__lowerCAmelCase : Any = get_resize_output_image_size(
_lowercase , output_size=(size['height'], size['width']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : List[str] = size if size is not None else self.size
__lowerCAmelCase : Any = get_size_dict(_lowercase )
__lowerCAmelCase : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__lowerCAmelCase : Any = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__lowerCAmelCase : Optional[int] = resample if resample is not None else self.resample
__lowerCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
__lowerCAmelCase : int = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase : int = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
__lowerCAmelCase : str = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
__lowerCAmelCase : List[Any] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
__lowerCAmelCase : List[Any] = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
__lowerCAmelCase : Dict = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
__lowerCAmelCase : str = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
__lowerCAmelCase : Optional[int] = target_sizes.numpy()
__lowerCAmelCase : Optional[int] = []
for idx in range(len(_lowercase ) ):
__lowerCAmelCase : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
__lowerCAmelCase : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
__lowerCAmelCase : int = logits.argmax(dim=1 )
__lowerCAmelCase : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 86 |
"""simple docstring"""
from collections import defaultdict
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = first_str.lower().strip()
__UpperCAmelCase = second_str.lower().strip()
# Remove whitespace
__UpperCAmelCase = first_str.replace(''' ''' , '''''' )
__UpperCAmelCase = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(snake_case_ ) != len(snake_case_ ):
return False
# Default values for count should be 0
__UpperCAmelCase = defaultdict(snake_case_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : List[Any] = input('Enter the first string ').strip()
_lowercase : Tuple = input('Enter the second string ').strip()
_lowercase : str = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 332 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class A ( _lowerCAmelCase ):
__UpperCAmelCase : Union[str, Any] = "data2vec-vision"
def __init__(self : Optional[int] , __UpperCAmelCase : Optional[int]=7_6_8 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : str=1_2 , __UpperCAmelCase : str=3_0_7_2 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[Any]=1E-12 , __UpperCAmelCase : List[Any]=2_2_4 , __UpperCAmelCase : Dict=1_6 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=[3, 5, 7, 1_1] , __UpperCAmelCase : List[Any]=[1, 2, 3, 6] , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=0.4 , __UpperCAmelCase : Optional[Any]=2_5_6 , __UpperCAmelCase : str=1 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Union[str, Any]=2_5_5 , **__UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_lowercase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = use_mask_token
UpperCAmelCase__ = use_absolute_position_embeddings
UpperCAmelCase__ = use_relative_position_bias
UpperCAmelCase__ = use_shared_relative_position_bias
UpperCAmelCase__ = layer_scale_init_value
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase__ = out_indices
UpperCAmelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase__ = use_auxiliary_head
UpperCAmelCase__ = auxiliary_loss_weight
UpperCAmelCase__ = auxiliary_channels
UpperCAmelCase__ = auxiliary_num_convs
UpperCAmelCase__ = auxiliary_concat_input
UpperCAmelCase__ = semantic_loss_ignore_index
class A ( _lowerCAmelCase ):
__UpperCAmelCase : int = version.parse('1.11' )
@property
def lowercase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
return 1E-4
| 65 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Dict , _lowercase : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ):
__UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
# set architectures equal to `None`
__UpperCAmelCase = None
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Any ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def a ( self : List[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowercase : str ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 332 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Tuple , lowerCAmelCase : int = 768 , ):
super().__init__()
lowerCAmelCase = nn.Parameter(torch.zeros(1 , _lowercase ) )
lowerCAmelCase = nn.Parameter(torch.ones(1 , _lowercase ) )
def __lowercase ( self : Any , lowerCAmelCase : Optional[Union[str, torch.device]] = None , lowerCAmelCase : Optional[torch.dtype] = None , ):
lowerCAmelCase = nn.Parameter(self.mean.to(_lowercase ).to(_lowercase ) )
lowerCAmelCase = nn.Parameter(self.std.to(_lowercase ).to(_lowercase ) )
return self
def __lowercase ( self : Optional[int] , lowerCAmelCase : List[Any] ):
lowerCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __lowercase ( self : List[str] , lowerCAmelCase : Optional[int] ):
lowerCAmelCase = (embeds * self.std) + self.mean
return embeds
| 155 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : Tuple , _lowercase : Dict=None , _lowercase : str=None , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ):
if tokenize_kwargs is None:
__UpperCAmelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__UpperCAmelCase = truncation
__UpperCAmelCase = tokenize_kwargs
__UpperCAmelCase = {}
if return_tensors is not None:
__UpperCAmelCase = return_tensors
return preprocess_params, {}, postprocess_params
def a ( self : int , _lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
return model_inputs
def a ( self : List[str] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : int , _lowercase : Tuple , _lowercase : str=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
return super().__call__(*_lowercase , **_lowercase )
| 332 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : list[int] ):
'''simple docstring'''
lowercase = len(snake_case_ )
for i in range(snake_case_ ):
for j in range(i + 1 , snake_case_ ):
if numbers[j] < numbers[i]:
lowercase , lowercase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase : Tuple = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 220 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( snake_case_ :List[Any] ):
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
__UpperCAmelCase = [image]
__UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase = torch.stack(snake_case_ )
return image
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowercase : str , _lowercase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def a ( self : int , _lowercase : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
# get the original timestep using init_timestep
__UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
__UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ):
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' )
__UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = init_latents.shape
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
print('''add noise to latents at timestep''' , _lowercase )
__UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
self.check_inputs(_lowercase )
# 2. Preprocess image
__UpperCAmelCase = preprocess(_lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
__UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device )
__UpperCAmelCase = timesteps[:1].repeat(_lowercase )
# 4. Prepare latent variables
__UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase )
__UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowercase ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowercase )
| 332 | 0 |
from __future__ import annotations
def snake_case_ ( snake_case ) -> Dict:
return len(set(snake_case_ ) ) == len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Union[str, Any] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 332 | 0 |
def __snake_case ( __UpperCamelCase : int = 10 ,__UpperCamelCase : int = 1000 ,__UpperCamelCase : bool = True ):
"""simple docstring"""
assert (
isinstance(snake_case_ ,snake_case_ )
and isinstance(snake_case_ ,snake_case_ )
and isinstance(snake_case_ ,snake_case_ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
assert (
isinstance(snake_case_ ,snake_case_ ) and isinstance(snake_case_ ,snake_case_ ) and isinstance(snake_case_ ,snake_case_ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(__UpperCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
A_ = lower
A_ = higher
A_ = []
while True:
A_ = get_avg(snake_case_ ,snake_case_ )
last_numbers.append(snake_case_ )
if answer(snake_case_ ) == "low":
A_ = number
elif answer(snake_case_ ) == "high":
A_ = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def __snake_case ( ):
"""simple docstring"""
A_ = int(input("Enter lower value : " ).strip() )
A_ = int(input("Enter high value : " ).strip() )
A_ = int(input("Enter value to guess : " ).strip() )
guess_the_number(snake_case_ ,snake_case_ ,snake_case_ )
if __name__ == "__main__":
main() | 312 |
"""simple docstring"""
_lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowercase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 332 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict , snake_case_ : List[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
snake_case__ : Any = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
snake_case__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
snake_case__ : Tuple = TensorFlowBenchmark(_lowercase )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : str ):
snake_case__ : Tuple = """sgugger/tiny-distilbert-classification"""
snake_case__ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
snake_case__ : List[str] = TensorFlowBenchmark(_lowercase )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : List[str] ):
snake_case__ : Optional[int] = """sshleifer/tiny-gpt2"""
snake_case__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case__ : List[Any] = TensorFlowBenchmark(_lowercase )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Any = """sshleifer/tiny-gpt2"""
snake_case__ : List[Any] = AutoConfig.from_pretrained(_lowercase )
snake_case__ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_lowercase , multi_process=_lowercase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(_lowercase , [config] )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = """sshleifer/tiny-gpt2"""
snake_case__ : List[Any] = AutoConfig.from_pretrained(_lowercase )
snake_case__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case__ : Any = TensorFlowBenchmark(_lowercase , [config] )
snake_case__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : str ):
snake_case__ : Tuple = """sshleifer/tiny-gpt2"""
snake_case__ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
snake_case__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self : str ):
snake_case__ : str = """sshleifer/tiny-gpt2"""
snake_case__ : int = AutoConfig.from_pretrained(_lowercase )
snake_case__ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case__ : int = TensorFlowBenchmark(_lowercase , [config] )
snake_case__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self : Dict ):
snake_case__ : Any = """patrickvonplaten/t5-tiny-random"""
snake_case__ : List[str] = AutoConfig.from_pretrained(_lowercase )
snake_case__ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
snake_case__ : Union[str, Any] = TensorFlowBenchmark(_lowercase , configs=[config] )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def lowerCamelCase ( self : Tuple ):
snake_case__ : int = """sshleifer/tiny-gpt2"""
snake_case__ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_lowercase , multi_process=_lowercase , )
snake_case__ : Optional[Any] = TensorFlowBenchmark(_lowercase )
snake_case__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self : Dict ):
snake_case__ : Dict = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(_lowercase , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(_lowercase , """env.csv""" ) , multi_process=_lowercase , )
snake_case__ : List[Any] = TensorFlowBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , """env.csv""" ) ).exists() )
def lowerCamelCase ( self : List[str] ):
snake_case__ : Optional[Any] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(snake_case_ : List[Any] ):
self.assertTrue(hasattr(_lowercase , """sequential""" ) )
self.assertTrue(hasattr(_lowercase , """cumulative""" ) )
self.assertTrue(hasattr(_lowercase , """current""" ) )
self.assertTrue(hasattr(_lowercase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , """log.txt""" ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , eager_mode=_lowercase , multi_process=_lowercase , )
snake_case__ : Optional[int] = TensorFlowBenchmark(_lowercase )
snake_case__ : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowercase , """log.txt""" ) ).exists() )
| 35 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Optional[int] ):
return EnvironmentCommand()
def lowercase__ ( snake_case_ :List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase ( _lowerCAmelCase ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ):
__UpperCAmelCase = accelerate_config_file
def a ( self : Dict ):
__UpperCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
__UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = __UpperCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
__UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else F'''\t{accelerate_config}'''
)
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_torch_available():
import torch
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
__UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCAmelCase = flax.__version__
__UpperCAmelCase = jax.__version__
__UpperCAmelCase = jaxlib.__version__
__UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
__UpperCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def a ( _lowercase : str ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 332 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( _lowerCAmelCase , unittest.TestCase):
'''simple docstring'''
_A = FunnelTokenizer
_A = FunnelTokenizerFast
_A = True
_A = True
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
super().setUp()
__UpperCamelCase : Union[str, Any] = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowerCamelCase ( self :List[Any] , **a :Optional[Any] ) -> List[Any]:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _lowerCamelCase ( self :Union[str, Any] , **a :Dict ) -> List[str]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def _lowerCamelCase ( self :Optional[Any] , a :Dict ) -> Tuple:
__UpperCamelCase : List[Any] = "UNwant\u00E9d,running"
__UpperCamelCase : int = "unwanted, running"
return input_text, output_text
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : Union[str, Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_lowercase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 1_0, 8, 9] )
def _lowerCamelCase ( self :List[Any] ) -> Tuple:
__UpperCamelCase : str = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
__UpperCamelCase : Optional[Any] = tokenizer("UNwant\u00E9d,running" )
__UpperCamelCase : Dict = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
__UpperCamelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len ) | 232 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ):
__UpperCAmelCase = sorted(numsa + numsa )
__UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()]
_lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 332 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = ['model.decoder.embed_positions.weights']
def _lowercase ( lowercase__ ):
if "emb" in name:
__lowerCAmelCase : Dict = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__lowerCAmelCase : Optional[int] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__lowerCAmelCase : int = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__lowerCAmelCase : List[str] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__lowerCAmelCase : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__lowerCAmelCase : str = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__lowerCAmelCase : Optional[Any] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__lowerCAmelCase : Union[str, Any] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__lowerCAmelCase : str = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCAmelCase : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = list(state_dict.keys() )
__lowerCAmelCase : Dict = {}
for key in keys:
__lowerCAmelCase : Optional[int] = state_dict.pop(snake_case_ )
__lowerCAmelCase : List[Any] = rename_keys(snake_case_ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCAmelCase : str = val[:hidden_size, :]
__lowerCAmelCase : Any = val[hidden_size : 2 * hidden_size, :]
__lowerCAmelCase : Tuple = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCAmelCase : Optional[Any] = val
else:
__lowerCAmelCase : Optional[int] = val
return state_dict, enc_dec_proj_state_dict
def _lowercase ( lowercase__ ):
if checkpoint == "small":
# default config values
__lowerCAmelCase : Optional[Any] = 1_0_2_4
__lowerCAmelCase : Tuple = 2_4
__lowerCAmelCase : List[str] = 1_6
elif checkpoint == "medium":
__lowerCAmelCase : Optional[Any] = 1_5_3_6
__lowerCAmelCase : Optional[int] = 4_8
__lowerCAmelCase : List[Any] = 2_4
elif checkpoint == "large":
__lowerCAmelCase : List[Any] = 2_0_4_8
__lowerCAmelCase : List[Any] = 4_8
__lowerCAmelCase : Dict = 3_2
else:
raise ValueError(f"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" )
__lowerCAmelCase : Any = MusicgenDecoderConfig(
hidden_size=snake_case_ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , )
return config
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__=None , lowercase__=None , lowercase__="cpu" ):
__lowerCAmelCase : str = MusicGen.get_pretrained(snake_case_ , device=snake_case_ )
__lowerCAmelCase : str = decoder_config_from_checkpoint(snake_case_ )
__lowerCAmelCase : Optional[Any] = fairseq_model.lm.state_dict()
__lowerCAmelCase, __lowerCAmelCase : Tuple = rename_state_dict(
snake_case_ , hidden_size=decoder_config.hidden_size )
__lowerCAmelCase : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
__lowerCAmelCase : Union[str, Any] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__lowerCAmelCase : str = MusicgenForCausalLM(snake_case_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = decoder.load_state_dict(snake_case_ , strict=snake_case_ )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(snake_case_ ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__lowerCAmelCase : int = MusicgenForConditionalGeneration(text_encoder=snake_case_ , audio_encoder=snake_case_ , decoder=snake_case_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case_ )
# check we can do a forward pass
__lowerCAmelCase : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCAmelCase : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCAmelCase : Tuple = model(input_ids=snake_case_ , decoder_input_ids=snake_case_ ).logits
if logits.shape != (8, 1, 2_0_4_8):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''t5-base''' )
__lowerCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__lowerCAmelCase : Optional[int] = MusicgenProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
# set the appropriate bos/pad token ids
__lowerCAmelCase : Union[str, Any] = 2_0_4_8
__lowerCAmelCase : Optional[int] = 2_0_4_8
# set other default generation config params
__lowerCAmelCase : Optional[int] = int(3_0 * audio_encoder.config.frame_rate )
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[int] = 3.0
if pytorch_dump_folder is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(snake_case_ )
processor.push_to_hub(snake_case_ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_UpperCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 275 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = str(id_ )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = []
__UpperCAmelCase = {} # {vertex:distance}
def __lt__( self : str , _lowercase : List[Any] ):
return self.key < other.key
def __repr__( self : int ):
return self.id
def a ( self : Union[str, Any] , _lowercase : int ):
self.neighbors.append(_lowercase )
def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = weight
def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case_ )
graph[b - 1].add_edge(graph[a - 1] , snake_case_ )
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
__UpperCAmelCase = []
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = graph[:]
while q:
__UpperCAmelCase = min(snake_case_ )
q.remove(snake_case_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
for i in range(1 , len(snake_case_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = list(snake_case_ )
hq.heapify(snake_case_ )
while h:
__UpperCAmelCase = hq.heappop(snake_case_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
hq.heapify(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 | 0 |
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
) | 152 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Dict = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "swinv2"
a__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
__UpperCAmelCase = (0, 0, 0, 0)
| 332 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _A () -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE_ : List[Any] = g.get_repo('''huggingface/diffusers''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE_ : Tuple = sorted(issue.get_comments() , key=lambda __a : i.created_at , reverse=snake_case_ )
SCREAMING_SNAKE_CASE_ : str = comments[0] if len(snake_case_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 91 |
"""simple docstring"""
import pprint
import requests
_lowercase : Optional[Any] = 'https://zenquotes.io/api'
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_lowercase : int = random_quotes()
pprint.pprint(response)
| 332 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = "cpu" , _UpperCamelCase = None ):
__lowerCAmelCase : Any = torch.load(snake_case_ , map_location=snake_case_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case_ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__lowerCAmelCase : Any = v.half()
if save_path is None: # overwrite src_path
__lowerCAmelCase : int = src_path
torch.save(snake_case_ , snake_case_ )
if __name__ == "__main__":
fire.Fire(convert) | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ):
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
__UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ )
def lowercase__ ( snake_case_ :int , snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :Union[str, Any]=1E-5 , snake_case_ :List[str]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase = [1] * inputs.shape.rank
__UpperCAmelCase = shape_list(snake_case_ )[axis]
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :List[str]=0 , snake_case_ :Optional[Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase = tf.shape(snake_case_ )
__UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :tf.Tensor ):
if not isinstance(snake_case_ , tf.Tensor ):
__UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ):
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :List[Any] , snake_case_ :List[str] ):
__UpperCAmelCase = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
__UpperCAmelCase = np.asarray(snake_case_ )
__UpperCAmelCase = 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
__UpperCAmelCase = chunk_data
else:
__UpperCAmelCase = data
def lowercase__ ( snake_case_ :str , snake_case_ :List[str] ):
if name in group.attrs:
__UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase = []
__UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__ ( snake_case_ :Tuple ):
def _expand_single_ad_tensor(snake_case_ :Optional[int] ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 332 | 0 |
from __future__ import annotations
from typing import Any
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 0 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = row, column
UpperCAmelCase__ = [[default_value for c in range(_lowercase )] for r in range(_lowercase )]
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
UpperCAmelCase__ = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase__ = max(_lowercase , len(str(_lowercase ) ) )
UpperCAmelCase__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(__UpperCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
UpperCAmelCase__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowercase ) for row_vector in self.array )
return s
def __repr__(self : Tuple ) -> Any:
"""simple docstring"""
return str(self )
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : tuple[int, int] ) -> Optional[int]:
"""simple docstring"""
if not (isinstance(_lowercase , (list, tuple) ) and len(_lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : str , __UpperCAmelCase : tuple[int, int] ) -> List[Any]:
"""simple docstring"""
assert self.validate_indicies(_lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__(self : Any , __UpperCAmelCase : tuple[int, int] , __UpperCAmelCase : float ) -> List[Any]:
"""simple docstring"""
assert self.validate_indicies(_lowercase )
UpperCAmelCase__ = value
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> List[str]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ = self[r, c] + another[r, c]
return result
def __neg__(self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ = -self[r, c]
return result
def __sub__(self : Optional[int] , __UpperCAmelCase : Matrix ) -> int:
"""simple docstring"""
return self + (-another)
def __mul__(self : Optional[Any] , __UpperCAmelCase : int | float | Matrix ) -> Tuple:
"""simple docstring"""
if isinstance(_lowercase , (int, float) ): # Scalar multiplication
UpperCAmelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ = self[r, c] * another
return result
elif isinstance(_lowercase , _lowercase ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase__ = f"""Unsupported type given for another ({type(_lowercase )})"""
raise TypeError(_lowercase )
def lowercase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase__ = self[r, c]
return result
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Matrix , __UpperCAmelCase : Matrix ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase__ = v.transpose()
UpperCAmelCase__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = Matrix(3, 3, 0 )
for i in range(3 ):
UpperCAmelCase__ = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
UpperCAmelCase__ = Matrix(3, 1, 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1, 2, -3
UpperCAmelCase__ = Matrix(3, 1, 0 )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_, snake_case_ )}""" )
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 65 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( snake_case_ :Union[str, Any]=None ):
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('''env''' )
else:
__UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = is_xpu_available()
__UpperCAmelCase = is_npu_available()
__UpperCAmelCase = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(snake_case_ ):
__UpperCAmelCase = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(snake_case_ ),
'''PyTorch NPU available''': str(snake_case_ ),
'''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
__UpperCAmelCase = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(snake_case_ , snake_case_ )
else F'''\t{accelerate_config}'''
)
print(snake_case_ )
__UpperCAmelCase = accelerate_config
return info
def lowercase__ ( ):
__UpperCAmelCase = env_command_parser()
__UpperCAmelCase = parser.parse_args()
env_command(snake_case_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 332 | 0 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_a = JukeboxTokenizer
_a = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def __lowercase ( self : Union[str, Any] ):
import torch
lowerCAmelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowerCAmelCase = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __lowercase ( self : Optional[Any] ):
import torch
lowerCAmelCase = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowerCAmelCase = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 155 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_lowercase : List[str] = 25_00_04
_lowercase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Union[str, Any] = MBartaaTokenizer
a__ : List[str] = MBartaaTokenizerFast
a__ : Any = True
a__ : List[str] = True
def a ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : Dict ):
__UpperCAmelCase = '''<s>'''
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_lowercase ) , 10_54 )
def a ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_54 )
def a ( self : str ):
__UpperCAmelCase = MBartaaTokenizer(_lowercase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_lowercase )
__UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def a ( self : str ):
# fmt: off
__UpperCAmelCase = {'''input_ids''': [[25_00_04, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [25_00_04, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_00_04, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def a ( self : str ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
__UpperCAmelCase = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(_lowercase )
__UpperCAmelCase = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
a__ : str = "facebook/mbart-large-50-one-to-many-mmt"
a__ : Union[str, Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
a__ : Any = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
a__ : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def a ( cls : Tuple ):
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__UpperCAmelCase = 1
return cls
def a ( self : Union[str, Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 25_00_38 )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def a ( self : Optional[Any] ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
__UpperCAmelCase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _lowercase )
__UpperCAmelCase = 10
__UpperCAmelCase = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[0] , _lowercase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_lowercase ) , _lowercase )
def a ( self : Optional[int] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_53, 25_00_01] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowercase , return_tensors='''pt''' )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors='''pt''' )
__UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=10 , return_tensors='''pt''' )
__UpperCAmelCase = targets['''input_ids''']
__UpperCAmelCase = shift_tokens_right(_lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self : Dict ):
__UpperCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_lowercase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[25_00_04, 62, 30_34, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , )
| 332 | 0 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_UpperCamelCase : List[str] = False
try:
_UpperCamelCase : Dict = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class a :
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = [] ):
lowercase = 0
lowercase = choices
lowercase = prompt
if sys.platform == "win32":
lowercase = '*'
else:
lowercase = '➔ '
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def UpperCamelCase_ ( self , _lowerCamelCase ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = 1 ):
lowercase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def UpperCamelCase_ ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def UpperCamelCase_ ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def UpperCamelCase_ ( self ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def UpperCamelCase_ ( self ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(1_0 )] )
def UpperCamelCase_ ( self ):
lowercase = int(chr(self.current_selection ) )
lowercase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def UpperCamelCase_ ( self , _lowerCamelCase = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
lowercase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
lowercase = int(builtins.input() )
except ValueError:
lowercase = default_choice
else:
lowercase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(_lowercase , '\n' )
return choice
| 220 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ):
raise RuntimeError('''CUDA out of memory.''' )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] ):
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
def a ( self : Optional[int] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def a ( self : Tuple ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_lowercase : Optional[int] ):
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : List[Any] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : List[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def a ( self : Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : int ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def a ( self : str ):
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase )
__UpperCAmelCase = release_memory(_lowercase )
self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
| 332 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCAmelCase = ['text', 'image', 'audio']
def snake_case_ ( snake_case ) -> Dict:
lowercase__: Tuple = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(snake_case_ , snake_case_ ):
inputs.append(create_inputs(snake_case_ ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def snake_case_ ( snake_case ) -> Tuple:
lowercase__: int = []
for output in outputs:
if isinstance(snake_case_ , (str, AgentText) ):
output_types.append('text' )
elif isinstance(snake_case_ , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(snake_case_ , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class __a :
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowercase__: List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , _lowercase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase__: str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Any = create_inputs(self.tool.inputs )
lowercase__: Tuple = self.tool(*_lowercase )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase__: Dict = [outputs]
self.assertListEqual(output_types(_lowercase ) , self.tool.outputs )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: List[str] = create_inputs(self.tool.inputs )
lowercase__: List[Any] = self.tool(*_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase__: str = [outputs]
self.assertEqual(len(_lowercase ) , len(self.tool.outputs ) )
for output, output_type in zip(_lowercase , self.tool.outputs ):
lowercase__: Dict = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_lowercase , _lowercase ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Tuple = create_inputs(self.tool.inputs )
lowercase__: Optional[int] = []
for _input, input_type in zip(_lowercase , self.tool.inputs ):
if isinstance(_lowercase , _lowercase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase__: List[Any] = self.tool(*_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase__: Tuple = [outputs]
self.assertEqual(len(_lowercase ) , len(self.tool.outputs ) )
| 196 |
"""simple docstring"""
import argparse
import copy
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = {}
with open(snake_case_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__UpperCAmelCase = []
_list.append([line.split()[1], line.split()[2]] )
__UpperCAmelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__UpperCAmelCase = []
_list.append([line.split()[0], line.split()[2]] )
__UpperCAmelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ):
with open(snake_case_ ) as f:
__UpperCAmelCase = f.read(1 )
__UpperCAmelCase = start_node
__UpperCAmelCase = []
__UpperCAmelCase = start_node
__UpperCAmelCase = 0
while visiting not in first_solution:
__UpperCAmelCase = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution:
__UpperCAmelCase = k[1]
__UpperCAmelCase = k[0]
first_solution.append(snake_case_ )
__UpperCAmelCase = distance_of_first_solution + int(snake_case_ )
__UpperCAmelCase = best_node
first_solution.append(snake_case_ )
__UpperCAmelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__UpperCAmelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ):
__UpperCAmelCase = []
for n in solution[1:-1]:
__UpperCAmelCase = solution.index(snake_case_ )
for kn in solution[1:-1]:
__UpperCAmelCase = solution.index(snake_case_ )
if n == kn:
continue
__UpperCAmelCase = copy.deepcopy(snake_case_ )
__UpperCAmelCase = kn
__UpperCAmelCase = n
__UpperCAmelCase = 0
for k in _tmp[:-1]:
__UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__UpperCAmelCase = distance + int(i[1] )
_tmp.append(snake_case_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ):
__UpperCAmelCase = 1
__UpperCAmelCase = first_solution
__UpperCAmelCase = []
__UpperCAmelCase = distance_of_first_solution
__UpperCAmelCase = solution
while count <= iters:
__UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = neighborhood[index_of_best_solution]
__UpperCAmelCase = len(snake_case_ ) - 1
__UpperCAmelCase = False
while not found:
__UpperCAmelCase = 0
while i < len(snake_case_ ):
if best_solution[i] != solution[i]:
__UpperCAmelCase = best_solution[i]
__UpperCAmelCase = solution[i]
break
__UpperCAmelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__UpperCAmelCase = True
__UpperCAmelCase = best_solution[:-1]
__UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__UpperCAmelCase = cost
__UpperCAmelCase = solution
else:
__UpperCAmelCase = index_of_best_solution + 1
__UpperCAmelCase = neighborhood[index_of_best_solution]
if len(snake_case_ ) >= size:
tabu_list.pop(0 )
__UpperCAmelCase = count + 1
return best_solution_ever, best_cost
def lowercase__ ( snake_case_ :str=None ):
__UpperCAmelCase = generate_neighbours(args.File )
__UpperCAmelCase , __UpperCAmelCase = generate_first_solution(
args.File , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = tabu_search(
snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 332 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__a :List[str] = logging.get_logger(__name__)
class _a ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase ) | 312 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( snake_case_ :ndarray ):
return np.dot(snake_case_ , snake_case_ )
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
__UpperCAmelCase = regularization
__UpperCAmelCase = gamma
if kernel == "linear":
__UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__UpperCAmelCase = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowercase )
def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ):
__UpperCAmelCase = observations
__UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__UpperCAmelCase) , ) = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
__UpperCAmelCase = 0
((__UpperCAmelCase) , ) = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
__UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 )
__UpperCAmelCase = Bounds(0 , self.regularization )
__UpperCAmelCase = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
__UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
__UpperCAmelCase = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__UpperCAmelCase = s / n
def a ( self : List[Any] , _lowercase : ndarray ):
__UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__a = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class UpperCAmelCase_ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **snake_case_ : Optional[Any] ):
super().__init__(**_lowercase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Union[str, Any] , snake_case_ : Union[str, List[str], "Image", List["Image"]] , **snake_case_ : Tuple ):
return super().__call__(_lowercase , **_lowercase )
def lowerCamelCase ( self : str , **snake_case_ : Any ):
snake_case__ : Any = {}
if "candidate_labels" in kwargs:
snake_case__ : Any = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
snake_case__ : List[str] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]="This is a photo of {}." ):
snake_case__ : Optional[int] = load_image(_lowercase )
snake_case__ : Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case__ : Optional[Any] = candidate_labels
snake_case__ : List[str] = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
snake_case__ : List[Any] = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
snake_case__ : List[Any] = [text_inputs]
return inputs
def lowerCamelCase ( self : Dict , snake_case_ : Any ):
snake_case__ : Any = model_inputs.pop("""candidate_labels""" )
snake_case__ : Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
snake_case__ : List[Any] = text_inputs[0]
else:
# Batching case.
snake_case__ : Dict = text_inputs[0][0]
snake_case__ : str = self.model(**_lowercase , **_lowercase )
snake_case__ : List[str] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[Any] ):
snake_case__ : Dict = model_outputs.pop("""candidate_labels""" )
snake_case__ : str = model_outputs["""logits"""][0]
if self.framework == "pt":
snake_case__ : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case__ : Any = probs.tolist()
if not isinstance(_lowercase , _lowercase ):
snake_case__ : Any = [scores]
elif self.framework == "tf":
snake_case__ : Optional[int] = stable_softmax(_lowercase , axis=-1 )
snake_case__ : Any = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
snake_case__ : Union[str, Any] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda snake_case_ : -x[0] )
]
return result
| 35 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float) -> Any:
'''simple docstring'''
return 10 - x * x
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float) -> Union[str, Any]:
'''simple docstring'''
if equation(snake_case_) * equation(snake_case_) >= 0:
raise ValueError("Wrong space!")
__UpperCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
__UpperCamelCase : Tuple = (a + b) / 2
# Check if middle point is root
if equation(snake_case_) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_) * equation(snake_case_) < 0:
__UpperCamelCase : Optional[Any] = c
else:
__UpperCamelCase : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 232 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self : Tuple , _lowercase : str , _lowercase : str ):
__UpperCAmelCase , __UpperCAmelCase = text, pattern
__UpperCAmelCase , __UpperCAmelCase = len(_lowercase ), len(_lowercase )
def a ( self : Optional[int] , _lowercase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def a ( self : int , _lowercase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def a ( self : Optional[Any] ):
# searches pattern in text and returns index positions
__UpperCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCAmelCase = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
__UpperCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase : str = 'ABAABA'
_lowercase : Tuple = 'AB'
_lowercase : Dict = BoyerMooreSearch(text, pattern)
_lowercase : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 332 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : str = '▁'
A_ : int = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
A_ : str = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
A_ : Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 1024,
}
A_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
A_ : str = {'mustc': MUSTC_LANGS}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = MAX_MODEL_INPUT_SIZES
a__ = ["input_ids", "attention_mask"]
a__ = []
def __init__(self , lowercase__ , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__="<unk>" , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ) -> None:
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , do_upper_case=lowercase__ , do_lower_case=lowercase__ , tgt_lang=lowercase__ , lang_codes=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
__UpperCAmelCase = do_upper_case
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = load_json(lowercase__ )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = spm_file
__UpperCAmelCase = load_spm(lowercase__ , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCAmelCase = lang_codes
__UpperCAmelCase = LANGUAGES[lang_codes]
__UpperCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs]
__UpperCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__UpperCAmelCase = self.lang_tokens
__UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCAmelCase = {}
@property
def lowerCAmelCase_ (self ) -> int:
return len(self.encoder )
@property
def lowerCAmelCase_ (self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase_ (self , lowercase__ ) -> None:
__UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> None:
__UpperCAmelCase = self.lang_code_to_id[tgt_lang]
__UpperCAmelCase = [lang_code_id]
def lowerCAmelCase_ (self , lowercase__ ) -> List[str]:
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
return self.encoder.get(lowercase__ , self.encoder[self.unk_token] )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
return self.decoder.get(lowercase__ , self.unk_token )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
__UpperCAmelCase = []
__UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCAmelCase = self.sp_model.decode(lowercase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCAmelCase = []
else:
current_sub_tokens.append(lowercase__ )
__UpperCAmelCase = self.sp_model.decode(lowercase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase_ (self , lowercase__ , lowercase__=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
__UpperCAmelCase = [1] * len(self.prefix_tokens )
__UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__(self , lowercase__ ) -> None:
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = Path(lowercase__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
__UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , lowercase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase__ , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (str(lowercase__ ), str(lowercase__ ))
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
__UpperCAmelCase = sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE )
spm.Load(str(SCREAMING_SNAKE_CASE ) )
return spm
def __a ( SCREAMING_SNAKE_CASE ) -> Union[Dict, List]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=2 )
| 333 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowercase__ ) )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowercase__ ) )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowercase__ ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
def lowerCAmelCase_ (self ) -> str:
# pass variant but use the non-variant filenames
__UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# pass variant but use the non-variant filenames
__UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowercase__ , variant=lowercase__ ) )
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 1 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = model.config
__UpperCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
__UpperCAmelCase = MBartConfig(
is_decoder=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , add_cross_attention=SCREAMING_SNAKE_CASE , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE , add_final_layer_norm=SCREAMING_SNAKE_CASE , )
return encoder_config, decoder_config
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "encoder.model" in name:
__UpperCAmelCase = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
__UpperCAmelCase = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
__UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
__UpperCAmelCase = '''encoder.''' + name
if "attn.proj" in name:
__UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
__UpperCAmelCase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
__UpperCAmelCase = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
__UpperCAmelCase = '''encoder.layernorm.bias'''
return name
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
__UpperCAmelCase = key.split('''.''' )
__UpperCAmelCase = int(key_split[3] )
__UpperCAmelCase = int(key_split[5] )
__UpperCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCAmelCase = val[:dim, :]
__UpperCAmelCase = val[dim : dim * 2, :]
__UpperCAmelCase = val[-dim:, :]
else:
__UpperCAmelCase = val[:dim]
__UpperCAmelCase = val[dim : dim * 2]
__UpperCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
__UpperCAmelCase = val
return orig_state_dict
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
# load original model
__UpperCAmelCase = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE ).eval()
# load HuggingFace model
__UpperCAmelCase , __UpperCAmelCase = get_configs(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = DonutSwinModel(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = MBartForCausalLM(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
model.eval()
__UpperCAmelCase = original_model.state_dict()
__UpperCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify results on scanned document
__UpperCAmelCase = load_dataset('''hf-internal-testing/example-documents''' )
__UpperCAmelCase = dataset['''test'''][0]['''image'''].convert('''RGB''' )
__UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE , from_slow=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
__UpperCAmelCase = DonutProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
__UpperCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__UpperCAmelCase = '''When is the coffee break?'''
__UpperCAmelCase = task_prompt.replace('''{user_input}''' , SCREAMING_SNAKE_CASE )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
__UpperCAmelCase = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
__UpperCAmelCase = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
__UpperCAmelCase = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
__UpperCAmelCase = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
__UpperCAmelCase = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
__UpperCAmelCase = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )[
'''input_ids'''
]
__UpperCAmelCase = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = model.encoder.embeddings(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
# verify encoder hidden states
__UpperCAmelCase = original_model.encoder(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = model.encoder(SCREAMING_SNAKE_CASE ).last_hidden_state
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-2 )
# verify decoder hidden states
__UpperCAmelCase = original_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
A_ : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 333 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
A_ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
# Return True if there is node that has not iterated.
__UpperCAmelCase = [False] * len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [s]
__UpperCAmelCase = True
while queue:
__UpperCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = True
__UpperCAmelCase = u
return visited[t]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = [-1] * (len(SCREAMING_SNAKE_CASE ))
__UpperCAmelCase = 0
__UpperCAmelCase = []
__UpperCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = float('''Inf''' )
__UpperCAmelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , graph[parent[s]][s] )
__UpperCAmelCase = parent[s]
max_flow += path_flow
__UpperCAmelCase = sink
while v != source:
__UpperCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCAmelCase = parent[v]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 333 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A_ : List[str] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if args.student_type == "roberta":
__UpperCAmelCase = False
def __a ( ) -> int:
'''simple docstring'''
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=SCREAMING_SNAKE_CASE , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=SCREAMING_SNAKE_CASE , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=SCREAMING_SNAKE_CASE , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=SCREAMING_SNAKE_CASE , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=SCREAMING_SNAKE_CASE , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=SCREAMING_SNAKE_CASE , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=SCREAMING_SNAKE_CASE , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=SCREAMING_SNAKE_CASE , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=SCREAMING_SNAKE_CASE , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=SCREAMING_SNAKE_CASE , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=SCREAMING_SNAKE_CASE , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE , default=5_0 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=SCREAMING_SNAKE_CASE , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=SCREAMING_SNAKE_CASE , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=SCREAMING_SNAKE_CASE , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=SCREAMING_SNAKE_CASE , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=SCREAMING_SNAKE_CASE , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=SCREAMING_SNAKE_CASE , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=SCREAMING_SNAKE_CASE , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=SCREAMING_SNAKE_CASE , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=SCREAMING_SNAKE_CASE , default=5_6 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=SCREAMING_SNAKE_CASE , default=5_0_0 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=SCREAMING_SNAKE_CASE , default=4_0_0_0 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE )
set_seed(SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = np.maximum(SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=SCREAMING_SNAKE_CASE , data=SCREAMING_SNAKE_CASE )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase = student_model_class(SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=SCREAMING_SNAKE_CASE , dataset=SCREAMING_SNAKE_CASE , token_probs=SCREAMING_SNAKE_CASE , student=SCREAMING_SNAKE_CASE , teacher=SCREAMING_SNAKE_CASE )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 333 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCAmelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase = ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = ['''key_proj''', '''value_proj''', '''query_proj''']
__UpperCAmelCase = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__UpperCAmelCase = key.split('''.''' )
if attributes[0] == "lm_head":
__UpperCAmelCase = prophet
__UpperCAmelCase = prophet_old
else:
__UpperCAmelCase = prophet.prophetnet
__UpperCAmelCase = prophet_old.model
__UpperCAmelCase = False
for attribute in attributes:
if attribute in mapping:
__UpperCAmelCase = mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCAmelCase = attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCAmelCase = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
__UpperCAmelCase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCAmelCase = old_model.bias
logger.info(f'''{attribute} is initialized''' )
__UpperCAmelCase = True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__UpperCAmelCase = old_model.in_proj_weight.shape[0] // 3
__UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCAmelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCAmelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCAmelCase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
__UpperCAmelCase = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
__UpperCAmelCase = True
break
if attribute.isdigit():
__UpperCAmelCase = model[int(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = old_model[int(SCREAMING_SNAKE_CASE )]
else:
__UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__UpperCAmelCase = old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
__UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : int = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class A_ ( _a ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "BlipImageProcessor"
a__ = "AutoTokenizer"
def __init__(self , lowercase__ , lowercase__ , lowercase__ ) -> Any:
super().__init__(lowercase__ , lowercase__ )
# add QFormer tokenizer
__UpperCAmelCase = qformer_tokenizer
def __call__(self , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__UpperCAmelCase = BatchFeature()
if text is not None:
__UpperCAmelCase = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_token_type_ids=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
encoding.update(lowercase__ )
__UpperCAmelCase = self.qformer_tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_token_type_ids=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
__UpperCAmelCase = qformer_text_encoding.pop('''input_ids''' )
__UpperCAmelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__UpperCAmelCase = self.image_processor(lowercase__ , return_tensors=lowercase__ )
encoding.update(lowercase__ )
return encoding
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Dict:
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase_ (self , lowercase__ , **lowercase__ ) -> Union[str, Any]:
if os.path.isfile(lowercase__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase__ , exist_ok=lowercase__ )
__UpperCAmelCase = os.path.join(lowercase__ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(lowercase__ )
return super().save_pretrained(lowercase__ , **lowercase__ )
@classmethod
def lowerCAmelCase_ (cls , lowercase__ , **lowercase__ ) -> Union[str, Any]:
__UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase__ , subfolder='''qformer_tokenizer''' )
__UpperCAmelCase = cls._get_arguments_from_pretrained(lowercase__ , **lowercase__ )
args.append(lowercase__ )
return cls(*lowercase__ )
| 333 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
A_ : Any = logging.getLogger(__name__)
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
A_ : Union[str, Any] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
A_ : Any = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
A_ : Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
A_ : List[Any] = [0] * args.vocab_size
for k, v in counter.items():
A_ : Tuple = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 333 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A_ : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__UpperCAmelCase = get_sagemaker_input()
else:
__UpperCAmelCase = get_cluster_input()
return config
def __a ( SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('''config''' , description=SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = get_user_input()
if args.config_file is not None:
__UpperCAmelCase = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def __a ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = config_command_parser()
__UpperCAmelCase = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
class A_ :
'''simple docstring'''
def __init__(self ) -> List[str]:
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = {}
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
if vertex not in self.adjacency:
__UpperCAmelCase = {}
self.num_vertices += 1
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
self.add_vertex(lowercase__ )
self.add_vertex(lowercase__ )
if head == tail:
return
__UpperCAmelCase = weight
__UpperCAmelCase = weight
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.get_edges()
for edge in edges:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase__ ) ):
__UpperCAmelCase = list(edges[i] )
edges.sort(key=lambda lowercase__ : e[2] )
for i in range(len(lowercase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCAmelCase = edges[i][2] + 1
for edge in edges:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = edge
__UpperCAmelCase = weight
__UpperCAmelCase = weight
def __str__(self ) -> Optional[Any]:
__UpperCAmelCase = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCAmelCase = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip('''\n''' )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowerCAmelCase_ (self ) -> int:
return self.adjacency.keys()
@staticmethod
def lowerCAmelCase_ (lowercase__=None , lowercase__=None ) -> Dict:
__UpperCAmelCase = Graph()
if vertices is None:
__UpperCAmelCase = []
if edges is None:
__UpperCAmelCase = []
for vertex in vertices:
g.add_vertex(lowercase__ )
for edge in edges:
g.add_edge(*lowercase__ )
return g
class A_ :
'''simple docstring'''
def __init__(self ) -> List[Any]:
__UpperCAmelCase = {}
__UpperCAmelCase = {}
def __len__(self ) -> Tuple:
return len(self.parent )
def lowerCAmelCase_ (self , lowercase__ ) -> Tuple:
if item in self.parent:
return self.find(lowercase__ )
__UpperCAmelCase = item
__UpperCAmelCase = 0
return item
def lowerCAmelCase_ (self , lowercase__ ) -> Tuple:
if item not in self.parent:
return self.make_set(lowercase__ )
if item != self.parent[item]:
__UpperCAmelCase = self.find(self.parent[item] )
return self.parent[item]
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.find(lowercase__ )
__UpperCAmelCase = self.find(lowercase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCAmelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCAmelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCAmelCase = roota
return roota
return None
@staticmethod
def lowerCAmelCase_ (lowercase__ ) -> Any:
__UpperCAmelCase = graph.num_vertices
__UpperCAmelCase = Graph.UnionFind()
__UpperCAmelCase = []
while num_components > 1:
__UpperCAmelCase = {}
for vertex in graph.get_vertices():
__UpperCAmelCase = -1
__UpperCAmelCase = graph.get_edges()
for edge in edges:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = edge
__UpperCAmelCase = union_find.find(lowercase__ )
__UpperCAmelCase = union_find.find(lowercase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCAmelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCAmelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = cheap_edge[vertex]
if union_find.find(lowercase__ ) != union_find.find(lowercase__ ):
union_find.union(lowercase__ , lowercase__ )
mst_edges.append(cheap_edge[vertex] )
__UpperCAmelCase = num_components - 1
__UpperCAmelCase = Graph.build(edges=lowercase__ )
return mst
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : str = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ (cls ) -> int:
__UpperCAmelCase = TOKEN
HfFolder.save_token(lowercase__ )
@classmethod
def lowerCAmelCase_ (cls ) -> Any:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__UpperCAmelCase = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase__ , repo_id='''test-config''' , push_to_hub=lowercase__ , use_auth_token=self._token )
__UpperCAmelCase = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__UpperCAmelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowercase__ , use_auth_token=self._token )
__UpperCAmelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ (self ) -> List[Any]:
CustomConfig.register_for_auto_class()
__UpperCAmelCase = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__UpperCAmelCase = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__UpperCAmelCase = c.n_embd + 1 # int
__UpperCAmelCase = c.resid_pdrop + 1.0 # float
__UpperCAmelCase = not c.scale_attn_weights # bool
__UpperCAmelCase = c.summary_type + '''foo''' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowercase__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(lowercase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowercase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowercase__ , c.summary_type , '''mismatch for key: summary_type''' )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = PretrainedConfig()
__UpperCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowercase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__UpperCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase__ , lowercase__ )]
if len(lowercase__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F''' {', '.join(lowercase__ )}.''' )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
with self.assertRaises(lowercase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCAmelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__UpperCAmelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(lowercase__ )
def lowerCAmelCase_ (self ) -> int:
# A mock response for an HTTP head request to emulate server down
__UpperCAmelCase = mock.Mock()
__UpperCAmelCase = 500
__UpperCAmelCase = {}
__UpperCAmelCase = HTTPError
__UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=lowercase__ ) as mock_head:
__UpperCAmelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ (self ) -> List[Any]:
# This test is for deprecated behavior and can be removed in v5
__UpperCAmelCase = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = AutoConfig.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowercase__ )
__UpperCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(lowercase__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__UpperCAmelCase = AutoConfig.from_pretrained(lowercase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__UpperCAmelCase = ['''config.42.0.0.json''']
__UpperCAmelCase = 768
configuration.save_pretrained(lowercase__ )
shutil.move(os.path.join(lowercase__ , '''config.4.0.0.json''' ) , os.path.join(lowercase__ , '''config.42.0.0.json''' ) )
__UpperCAmelCase = AutoConfig.from_pretrained(lowercase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def lowerCAmelCase_ (self ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__UpperCAmelCase = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__UpperCAmelCase = '''v4.0.0'''
__UpperCAmelCase , __UpperCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
lowercase__ , return_unused_kwargs=lowercase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowercase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__UpperCAmelCase = '''v3.0.0'''
__UpperCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 333 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
A_ : Optional[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
A_ : str = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
A_ : Optional[Any] = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ) -> Optional[int]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase = new_id
# turn into Numpy arrays
__UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE )
if reduce_labels:
__UpperCAmelCase = 2_5_5
__UpperCAmelCase = label - 1
__UpperCAmelCase = 2_5_5
__UpperCAmelCase = label != ignore_index
__UpperCAmelCase = np.not_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = pred_label[mask]
__UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE )[mask]
__UpperCAmelCase = pred_label[pred_label == label]
__UpperCAmelCase = np.histogram(SCREAMING_SNAKE_CASE , bins=SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__UpperCAmelCase = np.histogram(SCREAMING_SNAKE_CASE , bins=SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__UpperCAmelCase = np.histogram(SCREAMING_SNAKE_CASE , bins=SCREAMING_SNAKE_CASE , range=(0, num_labels - 1) )[0]
__UpperCAmelCase = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = intersect_and_union(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = total_intersect_and_union(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# compute metrics
__UpperCAmelCase = {}
__UpperCAmelCase = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase = total_area_intersect / total_area_union
__UpperCAmelCase = total_area_intersect / total_area_label
__UpperCAmelCase = np.nanmean(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = np.nanmean(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = all_acc
__UpperCAmelCase = iou
__UpperCAmelCase = acc
if nan_to_num is not None:
__UpperCAmelCase = {metric: np.nan_to_num(SCREAMING_SNAKE_CASE , nan=SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ) -> Optional[int]:
__UpperCAmelCase = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
A_ : Dict = logging.getLogger(__name__)
torch.set_grad_enabled(False)
A_ : str = 'cuda' if torch.cuda.is_available() else 'cpu'
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_0_0 , SCREAMING_SNAKE_CASE=" " ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = text.split(SCREAMING_SNAKE_CASE )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )]
def __a ( SCREAMING_SNAKE_CASE ) -> dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE ):
titles.append(title if title is not None else '''''' )
texts.append(SCREAMING_SNAKE_CASE )
return {"title": titles, "text": texts}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> dict:
'''simple docstring'''
__UpperCAmelCase = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
__UpperCAmelCase = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__UpperCAmelCase = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__UpperCAmelCase = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc )
# And compute the embeddings
__UpperCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__UpperCAmelCase = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__UpperCAmelCase = dataset.map(
partial(SCREAMING_SNAKE_CASE , ctx_encoder=SCREAMING_SNAKE_CASE , ctx_tokenizer=SCREAMING_SNAKE_CASE ) , batched=SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE , )
# And finally save your dataset
__UpperCAmelCase = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__UpperCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=SCREAMING_SNAKE_CASE )
# And save the index
__UpperCAmelCase = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(SCREAMING_SNAKE_CASE )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=str(Path(_a ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
a__ = field(
default=_a , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
a__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
a__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
a__ = field(
default=str(Path(_a ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=_a , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
a__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
a__ = field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
A_ : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
A_ , A_ , A_ : Any = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
A_ : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
from scipy.stats import pearsonr
import datasets
A_ : List[Any] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
A_ : str = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
A_ : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__=False ) -> str:
if return_pvalue:
__UpperCAmelCase = pearsonr(lowercase__ , lowercase__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase__ , lowercase__ )[0] )}
| 333 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Dict = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class A_ ( _a ):
'''simple docstring'''
a__ = "dpr"
def __init__(self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = projection_dim
__UpperCAmelCase = position_embedding_type
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
import sys
import turtle
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
triangle(SCREAMING_SNAKE_CASE , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , get_mid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
A_ : Optional[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
A_ : Optional[Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 333 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = '''ZinengTang/tvlt-base'''
__UpperCAmelCase = tempfile.mkdtemp()
def lowerCAmelCase_ (self , **lowercase__ ) -> Optional[int]:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> List[str]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowercase__ )
self.assertIsInstance(processor.image_processor , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ )
__UpperCAmelCase = np.ones([12_000] )
__UpperCAmelCase = feature_extractor(lowercase__ , return_tensors='''np''' )
__UpperCAmelCase = processor(audio=lowercase__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ )
__UpperCAmelCase = np.ones([3, 224, 224] )
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''np''' )
__UpperCAmelCase = processor(images=lowercase__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ )
__UpperCAmelCase = np.ones([12_000] )
__UpperCAmelCase = np.ones([3, 224, 224] )
__UpperCAmelCase = processor(audio=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_feature_extractor()
__UpperCAmelCase = TvltProcessor(image_processor=lowercase__ , feature_extractor=lowercase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 333 |
import math
import sys
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCAmelCase = [-1] * (number + 1)
__UpperCAmelCase = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase = sys.maxsize
__UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase = 1 + answers[i - (j**2)]
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A_ ( _a ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ ) -> Optional[Any]:
__UpperCAmelCase = params
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = np.array([len(lowercase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__(self , lowercase__ ) -> Any:
return (self.token_ids[index], self.lengths[index])
def __len__(self ) -> Optional[int]:
return len(self.lengths )
def lowerCAmelCase_ (self ) -> Optional[Any]:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.params.max_model_input_size
__UpperCAmelCase = self.lengths > max_len
logger.info(F'''Splitting {sum(lowercase__ )} too long sequences.''' )
def divide_chunks(lowercase__ , lowercase__ ):
return [l[i : i + n] for i in range(0 , len(lowercase__ ) , lowercase__ )]
__UpperCAmelCase = []
__UpperCAmelCase = []
if self.params.mlm:
__UpperCAmelCase , __UpperCAmelCase = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__UpperCAmelCase , __UpperCAmelCase = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCAmelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCAmelCase = np.insert(lowercase__ , 0 , lowercase__ )
if sub_s[-1] != sep_id:
__UpperCAmelCase = np.insert(lowercase__ , len(lowercase__ ) , lowercase__ )
assert len(lowercase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowercase__ )
new_tok_ids.extend(lowercase__ )
new_lengths.extend([len(lowercase__ ) for l in sub_seqs] )
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = len(self )
__UpperCAmelCase = self.lengths > 11
__UpperCAmelCase = self.token_ids[indices]
__UpperCAmelCase = self.lengths[indices]
__UpperCAmelCase = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def lowerCAmelCase_ (self ) -> Any:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCAmelCase = self.params.special_tok_ids['''unk_token''']
__UpperCAmelCase = len(self )
__UpperCAmelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCAmelCase = (unk_occs / self.lengths) < 0.5
__UpperCAmelCase = self.token_ids[indices]
__UpperCAmelCase = self.lengths[indices]
__UpperCAmelCase = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def lowerCAmelCase_ (self ) -> Dict:
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowerCAmelCase_ (self , lowercase__ ) -> int:
__UpperCAmelCase = [t[0] for t in batch]
__UpperCAmelCase = [t[1] for t in batch]
assert len(lowercase__ ) == len(lowercase__ )
# Max for paddings
__UpperCAmelCase = max(lowercase__ )
# Pad token ids
if self.params.mlm:
__UpperCAmelCase = self.params.special_tok_ids['''pad_token''']
else:
__UpperCAmelCase = self.params.special_tok_ids['''unk_token''']
__UpperCAmelCase = [list(t.astype(lowercase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowercase__ )) for t in token_ids]
assert len(tk_ ) == len(lowercase__ )
assert all(len(lowercase__ ) == max_seq_len_ for t in tk_ )
__UpperCAmelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCAmelCase = torch.tensor(lowercase__ ) # (bs)
return tk_t, lg_t
| 333 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = 0
while b > 0:
if b & 1:
__UpperCAmelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 1 |
import requests
from bsa import BeautifulSoup
def __a ( SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
__UpperCAmelCase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
__UpperCAmelCase = soup.findAll('''h1''' )
__UpperCAmelCase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 333 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = SamImageProcessor()
__UpperCAmelCase = SamProcessor(lowercase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ (self , **lowercase__ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase__ ).image_processor
def lowerCAmelCase_ (self ) -> str:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
__UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = SamProcessor(image_processor=lowercase__ )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''np''' )
__UpperCAmelCase = processor(images=lowercase__ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = SamProcessor(image_processor=lowercase__ )
__UpperCAmelCase = [torch.ones((1, 3, 5, 5) )]
__UpperCAmelCase = [[1_764, 2_646]]
__UpperCAmelCase = [[683, 1_024]]
__UpperCAmelCase = processor.post_process_masks(lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__UpperCAmelCase = processor.post_process_masks(
lowercase__ , torch.tensor(lowercase__ ) , torch.tensor(lowercase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
__UpperCAmelCase = processor.post_process_masks(lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(lowercase__ ):
__UpperCAmelCase = processor.post_process_masks(lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) )
@require_vision
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = SamImageProcessor()
__UpperCAmelCase = SamProcessor(lowercase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ (self , **lowercase__ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase__ ).image_processor
def lowerCAmelCase_ (self ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
__UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = SamProcessor(image_processor=lowercase__ )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''np''' )
__UpperCAmelCase = processor(images=lowercase__ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = SamProcessor(image_processor=lowercase__ )
__UpperCAmelCase = [tf.ones((1, 3, 5, 5) )]
__UpperCAmelCase = [[1_764, 2_646]]
__UpperCAmelCase = [[683, 1_024]]
__UpperCAmelCase = processor.post_process_masks(lowercase__ , lowercase__ , lowercase__ , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__UpperCAmelCase = processor.post_process_masks(
lowercase__ , tf.convert_to_tensor(lowercase__ ) , tf.convert_to_tensor(lowercase__ ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
__UpperCAmelCase = processor.post_process_masks(
lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__UpperCAmelCase = processor.post_process_masks(
lowercase__ , np.array(lowercase__ ) , np.array(lowercase__ ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = SamImageProcessor()
__UpperCAmelCase = SamProcessor(lowercase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ (self , **lowercase__ ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase__ ).image_processor
def lowerCAmelCase_ (self ) -> str:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = SamProcessor(image_processor=lowercase__ )
__UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__UpperCAmelCase = [tf.convert_to_tensor(lowercase__ )]
__UpperCAmelCase = [torch.tensor(lowercase__ )]
__UpperCAmelCase = [[1_764, 2_646]]
__UpperCAmelCase = [[683, 1_024]]
__UpperCAmelCase = processor.post_process_masks(
lowercase__ , lowercase__ , lowercase__ , return_tensors='''tf''' )
__UpperCAmelCase = processor.post_process_masks(
lowercase__ , lowercase__ , lowercase__ , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = SamProcessor(image_processor=lowercase__ )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''pt''' )['''pixel_values'''].numpy()
__UpperCAmelCase = processor(images=lowercase__ , return_tensors='''pt''' )['''pixel_values'''].numpy()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''tf''' )['''pixel_values'''].numpy()
__UpperCAmelCase = processor(images=lowercase__ , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
| 333 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A_ ( _a ):
'''simple docstring'''
def __get__(self , lowercase__ , lowercase__=None ) -> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
__UpperCAmelCase = '''__cached_''' + self.fget.__name__
__UpperCAmelCase = getattr(lowercase__ , lowercase__ , lowercase__ )
if cached is None:
__UpperCAmelCase = self.fget(lowercase__ )
setattr(lowercase__ , lowercase__ , lowercase__ )
return cached
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if is_torch_fx_proxy(SCREAMING_SNAKE_CASE ):
return True
if is_torch_available():
import torch
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(SCREAMING_SNAKE_CASE , (jnp.ndarray, Tracer) ):
return True
return isinstance(SCREAMING_SNAKE_CASE , np.ndarray )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return isinstance(SCREAMING_SNAKE_CASE , np.ndarray )
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return _is_numpy(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
import torch
return isinstance(SCREAMING_SNAKE_CASE , torch.Tensor )
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
import torch
return isinstance(SCREAMING_SNAKE_CASE , torch.device )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
import torch
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return False
return isinstance(SCREAMING_SNAKE_CASE , torch.dtype )
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(SCREAMING_SNAKE_CASE , tf.Tensor )
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(SCREAMING_SNAKE_CASE , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE )
return type(SCREAMING_SNAKE_CASE ) == tf.Tensor
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(SCREAMING_SNAKE_CASE , jnp.ndarray )
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , (dict, UserDict) ):
return {k: to_py_obj(SCREAMING_SNAKE_CASE ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_py_obj(SCREAMING_SNAKE_CASE ) for o in obj]
elif is_tf_tensor(SCREAMING_SNAKE_CASE ):
return obj.numpy().tolist()
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(SCREAMING_SNAKE_CASE ):
return np.asarray(SCREAMING_SNAKE_CASE ).tolist()
elif isinstance(SCREAMING_SNAKE_CASE , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , (dict, UserDict) ):
return {k: to_numpy(SCREAMING_SNAKE_CASE ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
return np.array(SCREAMING_SNAKE_CASE )
elif is_tf_tensor(SCREAMING_SNAKE_CASE ):
return obj.numpy()
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(SCREAMING_SNAKE_CASE ):
return np.asarray(SCREAMING_SNAKE_CASE )
else:
return obj
class A_ ( _a ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = fields(self )
# Safety and consistency checks
if not len(lowercase__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
__UpperCAmelCase = getattr(self , class_fields[0].name )
__UpperCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase__ ):
if isinstance(lowercase__ , lowercase__ ):
__UpperCAmelCase = first_field.items()
__UpperCAmelCase = True
else:
try:
__UpperCAmelCase = iter(lowercase__ )
__UpperCAmelCase = True
except TypeError:
__UpperCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase__ ):
if (
not isinstance(lowercase__ , (list, tuple) )
or not len(lowercase__ ) == 2
or not isinstance(element[0] , lowercase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCAmelCase = element[1]
elif first_field is not None:
__UpperCAmelCase = first_field
else:
for field in class_fields:
__UpperCAmelCase = getattr(self , field.name )
if v is not None:
__UpperCAmelCase = v
def __delitem__(self , *lowercase__ , **lowercase__ ) -> Optional[int]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Union[str, Any]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> str:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> List[str]:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__(self , lowercase__ ) -> Any:
if isinstance(lowercase__ , lowercase__ ):
__UpperCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self , lowercase__ , lowercase__ ) -> Any:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase__ , lowercase__ )
super().__setattr__(lowercase__ , lowercase__ )
def __setitem__(self , lowercase__ , lowercase__ ) -> Union[str, Any]:
# Will raise a KeyException if needed
super().__setitem__(lowercase__ , lowercase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class A_ ( _a , _a ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ (cls , lowercase__ ) -> Any:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class A_ ( _a ):
'''simple docstring'''
a__ = "longest"
a__ = "max_length"
a__ = "do_not_pad"
class A_ ( _a ):
'''simple docstring'''
a__ = "pt"
a__ = "tf"
a__ = "np"
a__ = "jax"
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> Tuple:
__UpperCAmelCase = context_managers
__UpperCAmelCase = ExitStack()
def __enter__(self ) -> Optional[int]:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase__ )
def __exit__(self , *lowercase__ , **lowercase__ ) -> Optional[int]:
self.stack.__exit__(*lowercase__ , **lowercase__ )
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = infer_framework(SCREAMING_SNAKE_CASE )
if framework == "tf":
__UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = model_class.__name__
__UpperCAmelCase = infer_framework(SCREAMING_SNAKE_CASE )
if framework == "tf":
__UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE = "." ) -> List[str]:
'''simple docstring'''
def _flatten_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE="." ):
for k, v in d.items():
__UpperCAmelCase = str(SCREAMING_SNAKE_CASE ) + delimiter + str(SCREAMING_SNAKE_CASE ) if parent_key else k
if v and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
yield from flatten_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , delimiter=SCREAMING_SNAKE_CASE ).items()
else:
yield key, v
return dict(_flatten_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
@contextmanager
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> Tuple:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE ):
return np.transpose(SCREAMING_SNAKE_CASE , axes=SCREAMING_SNAKE_CASE )
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE )
elif is_tf_tensor(SCREAMING_SNAKE_CASE ):
import tensorflow as tf
return tf.transpose(SCREAMING_SNAKE_CASE , perm=SCREAMING_SNAKE_CASE )
elif is_jax_tensor(SCREAMING_SNAKE_CASE ):
return jnp.transpose(SCREAMING_SNAKE_CASE , axes=SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Type not supported for transpose: {type(SCREAMING_SNAKE_CASE )}.''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE ):
return np.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
return array.reshape(*SCREAMING_SNAKE_CASE )
elif is_tf_tensor(SCREAMING_SNAKE_CASE ):
import tensorflow as tf
return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif is_jax_tensor(SCREAMING_SNAKE_CASE ):
return jnp.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Type not supported for reshape: {type(SCREAMING_SNAKE_CASE )}.''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE ):
return np.squeeze(SCREAMING_SNAKE_CASE , axis=SCREAMING_SNAKE_CASE )
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE )
elif is_tf_tensor(SCREAMING_SNAKE_CASE ):
import tensorflow as tf
return tf.squeeze(SCREAMING_SNAKE_CASE , axis=SCREAMING_SNAKE_CASE )
elif is_jax_tensor(SCREAMING_SNAKE_CASE ):
return jnp.squeeze(SCREAMING_SNAKE_CASE , axis=SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE )}.''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE ):
return np.expand_dims(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
return array.unsqueeze(dim=SCREAMING_SNAKE_CASE )
elif is_tf_tensor(SCREAMING_SNAKE_CASE ):
import tensorflow as tf
return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=SCREAMING_SNAKE_CASE )
elif is_jax_tensor(SCREAMING_SNAKE_CASE ):
return jnp.expand_dims(SCREAMING_SNAKE_CASE , axis=SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE )}.''' )
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(SCREAMING_SNAKE_CASE ):
return np.size(SCREAMING_SNAKE_CASE )
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
return array.numel()
elif is_tf_tensor(SCREAMING_SNAKE_CASE ):
import tensorflow as tf
return tf.size(SCREAMING_SNAKE_CASE )
elif is_jax_tensor(SCREAMING_SNAKE_CASE ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE )}.''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(SCREAMING_SNAKE_CASE , (tuple, list) ):
__UpperCAmelCase = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCAmelCase = f'''{repo_id}--{value}'''
return auto_map
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
for base_class in inspect.getmro(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = base_class.__module__
__UpperCAmelCase = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 333 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A_ : int = 3
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
print('''Generating primitive root of p''' )
while True:
__UpperCAmelCase = random.randrange(3 , SCREAMING_SNAKE_CASE )
if pow(SCREAMING_SNAKE_CASE , 2 , SCREAMING_SNAKE_CASE ) == 1:
continue
if pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) == 1:
continue
return g
def __a ( SCREAMING_SNAKE_CASE ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print('''Generating prime p...''' )
__UpperCAmelCase = rabin_miller.generate_large_prime(SCREAMING_SNAKE_CASE ) # select large prime number.
__UpperCAmelCase = primitive_root(SCREAMING_SNAKE_CASE ) # one primitive root on modulo p.
__UpperCAmelCase = random.randrange(3 , SCREAMING_SNAKE_CASE ) # private_key -> have to be greater than 2 for safety.
__UpperCAmelCase = cryptomath.find_mod_inverse(pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = (key_size, e_a, e_a, p)
__UpperCAmelCase = (key_size, d)
return public_key, private_key
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__UpperCAmelCase , __UpperCAmelCase = generate_key(SCREAMING_SNAKE_CASE )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def __a ( ) -> None:
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
A_ : List[str] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class A_ ( _a ):
'''simple docstring'''
a__ = "imagegpt"
a__ = ["past_key_values"]
a__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , lowercase__=512 + 1 , lowercase__=32 * 32 , lowercase__=512 , lowercase__=24 , lowercase__=8 , lowercase__=None , lowercase__="quick_gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1E-5 , lowercase__=0.02 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Union[str, Any]:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_positions
__UpperCAmelCase = n_embd
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = n_inner
__UpperCAmelCase = activation_function
__UpperCAmelCase = resid_pdrop
__UpperCAmelCase = embd_pdrop
__UpperCAmelCase = attn_pdrop
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = scale_attn_weights
__UpperCAmelCase = use_cache
__UpperCAmelCase = scale_attn_by_inverse_layer_idx
__UpperCAmelCase = reorder_and_upcast_attn
__UpperCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase__ , **lowercase__ )
class A_ ( _a ):
'''simple docstring'''
@property
def lowerCAmelCase_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = 1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 32 , lowercase__ = 32 , ) -> Mapping[str, Any]:
__UpperCAmelCase = self._generate_dummy_images(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__UpperCAmelCase = dict(preprocessor(images=lowercase__ , return_tensors=lowercase__ ) )
return inputs
| 333 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A_ ( _a ):
'''simple docstring'''
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCAmelCase_ (self ) -> Optional[int]:
super().setup()
__UpperCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__(self , *lowercase__ , **lowercase__ ) -> Tuple:
__UpperCAmelCase = super().__call__(*lowercase__ , **lowercase__ )
__UpperCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A_ ( _a ):
'''simple docstring'''
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
def cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
__UpperCAmelCase = logits.shape[-1]
__UpperCAmelCase = (labels[..., None] == jnp.arange(SCREAMING_SNAKE_CASE )[None]).astype('''f4''' )
__UpperCAmelCase = jax.nn.log_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
__UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__UpperCAmelCase = reduction(SCREAMING_SNAKE_CASE )
return loss
__UpperCAmelCase = partial(SCREAMING_SNAKE_CASE , reduction=jnp.mean )
__UpperCAmelCase = cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = cross_entropy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A_ :
'''simple docstring'''
a__ = "google/bigbird-roberta-base"
a__ = 30_00
a__ = 1_05_00
a__ = 1_28
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_00_00
a__ = 0.00_95
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCAmelCase_ (self ) -> Optional[int]:
os.makedirs(self.base_dir , exist_ok=lowercase__ )
__UpperCAmelCase = os.path.join(self.base_dir , self.save_dir )
__UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 40_96 # no dynamic padding on TPUs
def __call__(self , lowercase__ ) -> int:
__UpperCAmelCase = self.collate_fn(lowercase__ )
__UpperCAmelCase = jax.tree_util.tree_map(lowercase__ , lowercase__ )
return batch
def lowerCAmelCase_ (self , lowercase__ ) -> int:
__UpperCAmelCase , __UpperCAmelCase = self.fetch_inputs(features['''input_ids'''] )
__UpperCAmelCase = {
'''input_ids''': jnp.array(lowercase__ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowercase__ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[Any]:
__UpperCAmelCase = [self._fetch_inputs(lowercase__ ) for ids in input_ids]
return zip(*lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = [1 for _ in range(len(lowercase__ ) )]
while len(lowercase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
if seed is not None:
__UpperCAmelCase = dataset.shuffle(seed=SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) // batch_size ):
__UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='''batch''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
def loss_fn(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = model_inputs.pop('''start_labels''' )
__UpperCAmelCase = model_inputs.pop('''end_labels''' )
__UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
__UpperCAmelCase = state.apply_fn(**SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE , dropout_rng=SCREAMING_SNAKE_CASE , train=SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = outputs
return state.loss_fn(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
__UpperCAmelCase , __UpperCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = jax.value_and_grad(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = grad_fn(state.params )
__UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__UpperCAmelCase = jax.lax.pmean(SCREAMING_SNAKE_CASE , '''batch''' )
__UpperCAmelCase = state.apply_gradients(grads=SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __a ( SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = model_inputs.pop('''start_labels''' )
__UpperCAmelCase = model_inputs.pop('''end_labels''' )
__UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
__UpperCAmelCase = state.apply_fn(**SCREAMING_SNAKE_CASE , params=state.params , train=SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = outputs
__UpperCAmelCase = state.loss_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class A_ ( train_state.TrainState ):
'''simple docstring'''
a__ = struct.field(pytree_node=_a )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ) -> int:
__UpperCAmelCase = model.params
__UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , loss_fn=lowercase__ , )
if ckpt_dir is not None:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = restore_checkpoint(lowercase__ , lowercase__ )
__UpperCAmelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__UpperCAmelCase , __UpperCAmelCase = build_tx(**lowercase__ )
__UpperCAmelCase = train_state.TrainState(
step=lowercase__ , apply_fn=model.__call__ , params=lowercase__ , tx=lowercase__ , opt_state=lowercase__ , )
__UpperCAmelCase = args
__UpperCAmelCase = data_collator
__UpperCAmelCase = lr
__UpperCAmelCase = params
__UpperCAmelCase = jax_utils.replicate(lowercase__ )
return state
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = self.args
__UpperCAmelCase = len(lowercase__ ) // args.batch_size
__UpperCAmelCase = jax.random.PRNGKey(0 )
__UpperCAmelCase = jax.random.split(lowercase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCAmelCase = get_batched_dataset(lowercase__ , args.batch_size , seed=lowercase__ )
__UpperCAmelCase = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc=F'''Running EPOCH-{epoch}''' ):
__UpperCAmelCase = self.data_collator(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self.train_step_fn(lowercase__ , lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__UpperCAmelCase = jax_utils.unreplicate(state.step )
__UpperCAmelCase = running_loss.item() / i
__UpperCAmelCase = self.scheduler_fn(state_step - 1 )
__UpperCAmelCase = self.evaluate(lowercase__ , lowercase__ )
__UpperCAmelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowercase__ ) )
self.logger.log(lowercase__ , commit=lowercase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Tuple:
__UpperCAmelCase = get_batched_dataset(lowercase__ , self.args.batch_size )
__UpperCAmelCase = len(lowercase__ ) // self.args.batch_size
__UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCAmelCase = 0
for batch in tqdm(lowercase__ , total=lowercase__ , desc='''Evaluating ... ''' ):
__UpperCAmelCase = self.data_collator(lowercase__ )
__UpperCAmelCase = self.val_step_fn(lowercase__ , **lowercase__ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Any:
__UpperCAmelCase = jax_utils.unreplicate(lowercase__ )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' )
self.model_save_fn(lowercase__ , params=state.params )
with open(os.path.join(lowercase__ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowercase__ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowercase__ , '''data_collator.joblib''' ) )
with open(os.path.join(lowercase__ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowercase__ )
print('''DONE''' )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__UpperCAmelCase = from_bytes(state.opt_state , f.read() )
__UpperCAmelCase = joblib.load(os.path.join(SCREAMING_SNAKE_CASE , '''args.joblib''' ) )
__UpperCAmelCase = joblib.load(os.path.join(SCREAMING_SNAKE_CASE , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''training_state.json''' ) , '''r''' ) as f:
__UpperCAmelCase = json.load(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = num_train_steps - warmup_steps
__UpperCAmelCase = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE , end_value=SCREAMING_SNAKE_CASE , transition_steps=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = optax.linear_schedule(init_value=SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
def weight_decay_mask(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = traverse_util.flatten_dict(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = scheduler_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = optax.adamw(learning_rate=SCREAMING_SNAKE_CASE , weight_decay=SCREAMING_SNAKE_CASE , mask=SCREAMING_SNAKE_CASE )
return tx, lr
| 333 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Dict = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : Tuple = concatenate_datasets
A_ : Union[str, Any] = DownloadConfig
A_ : Any = DownloadManager
A_ : Any = DownloadMode
A_ : int = DownloadConfig
A_ : int = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A_ ( _a ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[int]:
with open(lowercase__ , encoding='''utf-8''' ) as input_file:
__UpperCAmelCase = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
__UpperCAmelCase = input_file.read()
__UpperCAmelCase = regexp.search(lowercase__ )
return match
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
with open(lowercase__ , encoding='''utf-8''' ) as input_file:
__UpperCAmelCase = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
__UpperCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCAmelCase = regexp.finditer(lowercase__ )
__UpperCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = Path('''./datasets''' )
__UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase__ ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = Path('''./datasets''' )
__UpperCAmelCase = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowercase__ ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 333 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Initialise PyTorch model
__UpperCAmelCase = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase = FunnelBaseModel(SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
A_ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "cpu" , SCREAMING_SNAKE_CASE = None ) -> None:
'''simple docstring'''
__UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
__UpperCAmelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCAmelCase = src_path
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
fire.Fire(convert)
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
A_ : Dict = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ = field(
default=_a , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a__ = field(
default=_a , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
a__ = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a__ = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
a__ = field(
default=_a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class A_ :
'''simple docstring'''
a__ = field(
default=_a , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a__ = field(
default=_a , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
a__ = field(
default=_a , metadata={"help": "Train language if it is different from the evaluation language."} )
a__ = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a__ = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a__ = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a__ = field(
default=_a , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
a__ = field(
default=_a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a__ = field(
default=_a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a__ = field(
default=_a , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __a ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase = train_dataset.features['''label'''].names
if training_args.do_eval:
__UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase = eval_dataset.features['''label'''].names
if training_args.do_predict:
__UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase = predict_dataset.features['''label'''].names
# Labels
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__UpperCAmelCase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__UpperCAmelCase = False
def preprocess_function(SCREAMING_SNAKE_CASE ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
__UpperCAmelCase = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__UpperCAmelCase = train_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
__UpperCAmelCase = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__UpperCAmelCase = eval_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__UpperCAmelCase = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
__UpperCAmelCase = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
__UpperCAmelCase = predict_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
__UpperCAmelCase = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE ) else p.predictions
__UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__UpperCAmelCase = default_data_collator
elif training_args.fpaa:
__UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
__UpperCAmelCase = None
# Initialize our Trainer
__UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase = last_checkpoint
__UpperCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = train_result.metrics
__UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''train''' , SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE , metric_key_prefix='''predict''' )
__UpperCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics('''predict''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''predict''' , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
__UpperCAmelCase = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A_ : Optional[int] = None
A_ : Dict = logging.get_logger(__name__)
A_ : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
A_ : Union[str, Any] = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
A_ : str = '▁'
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = BigBirdTokenizer
a__ = ["input_ids", "attention_mask"]
a__ = []
def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__="[SEP]" , lowercase__="[MASK]" , lowercase__="[CLS]" , **lowercase__ , ) -> str:
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , **lowercase__ , )
__UpperCAmelCase = vocab_file
__UpperCAmelCase = False if not self.vocab_file else True
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 333 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( _a ):
'''simple docstring'''
a__ = 42
class A_ ( nn.Module ):
'''simple docstring'''
def __init__(self , lowercase__=3 , lowercase__=3 , lowercase__=("DownEncoderBlock2D",) , lowercase__=(64,) , lowercase__=2 , lowercase__=32 , lowercase__="silu" , lowercase__=True , ) -> Optional[Any]:
super().__init__()
__UpperCAmelCase = layers_per_block
__UpperCAmelCase = torch.nn.Convad(
lowercase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCAmelCase = None
__UpperCAmelCase = nn.ModuleList([] )
# down
__UpperCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(lowercase__ ):
__UpperCAmelCase = output_channel
__UpperCAmelCase = block_out_channels[i]
__UpperCAmelCase = i == len(lowercase__ ) - 1
__UpperCAmelCase = get_down_block(
lowercase__ , num_layers=self.layers_per_block , in_channels=lowercase__ , out_channels=lowercase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , )
self.down_blocks.append(lowercase__ )
# mid
__UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# out
__UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase__ , eps=1E-6 )
__UpperCAmelCase = nn.SiLU()
__UpperCAmelCase = 2 * out_channels if double_z else out_channels
__UpperCAmelCase = nn.Convad(block_out_channels[-1] , lowercase__ , 3 , padding=1 )
__UpperCAmelCase = False
def lowerCAmelCase_ (self , lowercase__ ) -> Any:
__UpperCAmelCase = x
__UpperCAmelCase = self.conv_in(lowercase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ ):
def custom_forward(*lowercase__ ):
return module(*lowercase__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , use_reentrant=lowercase__ )
# middle
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , use_reentrant=lowercase__ )
else:
for down_block in self.down_blocks:
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ )
# middle
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase__ )
else:
# down
for down_block in self.down_blocks:
__UpperCAmelCase = down_block(lowercase__ )
# middle
__UpperCAmelCase = self.mid_block(lowercase__ )
# post-process
__UpperCAmelCase = self.conv_norm_out(lowercase__ )
__UpperCAmelCase = self.conv_act(lowercase__ )
__UpperCAmelCase = self.conv_out(lowercase__ )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__(self , lowercase__=3 , lowercase__=3 , lowercase__=("UpDecoderBlock2D",) , lowercase__=(64,) , lowercase__=2 , lowercase__=32 , lowercase__="silu" , lowercase__="group" , ) -> Optional[Any]:
super().__init__()
__UpperCAmelCase = layers_per_block
__UpperCAmelCase = nn.Convad(
lowercase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__UpperCAmelCase = None
__UpperCAmelCase = nn.ModuleList([] )
__UpperCAmelCase = in_channels if norm_type == '''spatial''' else None
# mid
__UpperCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# up
__UpperCAmelCase = list(reversed(lowercase__ ) )
__UpperCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase__ ):
__UpperCAmelCase = output_channel
__UpperCAmelCase = reversed_block_out_channels[i]
__UpperCAmelCase = i == len(lowercase__ ) - 1
__UpperCAmelCase = get_up_block(
lowercase__ , num_layers=self.layers_per_block + 1 , in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , resnet_time_scale_shift=lowercase__ , )
self.up_blocks.append(lowercase__ )
__UpperCAmelCase = output_channel
# out
if norm_type == "spatial":
__UpperCAmelCase = SpatialNorm(block_out_channels[0] , lowercase__ )
else:
__UpperCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase__ , eps=1E-6 )
__UpperCAmelCase = nn.SiLU()
__UpperCAmelCase = nn.Convad(block_out_channels[0] , lowercase__ , 3 , padding=1 )
__UpperCAmelCase = False
def lowerCAmelCase_ (self , lowercase__ , lowercase__=None ) -> int:
__UpperCAmelCase = z
__UpperCAmelCase = self.conv_in(lowercase__ )
__UpperCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ ):
def custom_forward(*lowercase__ ):
return module(*lowercase__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
__UpperCAmelCase = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
else:
# middle
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ )
__UpperCAmelCase = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
__UpperCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ )
else:
# middle
__UpperCAmelCase = self.mid_block(lowercase__ , lowercase__ )
__UpperCAmelCase = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
__UpperCAmelCase = up_block(lowercase__ , lowercase__ )
# post-process
if latent_embeds is None:
__UpperCAmelCase = self.conv_norm_out(lowercase__ )
else:
__UpperCAmelCase = self.conv_norm_out(lowercase__ , lowercase__ )
__UpperCAmelCase = self.conv_act(lowercase__ )
__UpperCAmelCase = self.conv_out(lowercase__ )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__="random" , lowercase__=False , lowercase__=True ) -> Tuple:
super().__init__()
__UpperCAmelCase = n_e
__UpperCAmelCase = vq_embed_dim
__UpperCAmelCase = beta
__UpperCAmelCase = legacy
__UpperCAmelCase = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__UpperCAmelCase = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
__UpperCAmelCase = self.used.shape[0]
__UpperCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__UpperCAmelCase = self.re_embed
__UpperCAmelCase = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__UpperCAmelCase = n_e
__UpperCAmelCase = sane_index_shape
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = inds.shape
assert len(lowercase__ ) > 1
__UpperCAmelCase = inds.reshape(ishape[0] , -1 )
__UpperCAmelCase = self.used.to(lowercase__ )
__UpperCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__UpperCAmelCase = match.argmax(-1 )
__UpperCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__UpperCAmelCase = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__UpperCAmelCase = self.unknown_index
return new.reshape(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Any:
__UpperCAmelCase = inds.shape
assert len(lowercase__ ) > 1
__UpperCAmelCase = inds.reshape(ishape[0] , -1 )
__UpperCAmelCase = self.used.to(lowercase__ )
if self.re_embed > self.used.shape[0]: # extra token
__UpperCAmelCase = 0 # simply set to zero
__UpperCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase__ )
return back.reshape(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Any:
# reshape z -> (batch, height, width, channel) and flatten
__UpperCAmelCase = z.permute(0 , 2 , 3 , 1 ).contiguous()
__UpperCAmelCase = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__UpperCAmelCase = torch.argmin(torch.cdist(lowercase__ , self.embedding.weight ) , dim=1 )
__UpperCAmelCase = self.embedding(lowercase__ ).view(z.shape )
__UpperCAmelCase = None
__UpperCAmelCase = None
# compute loss for embedding
if not self.legacy:
__UpperCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__UpperCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__UpperCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__UpperCAmelCase = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__UpperCAmelCase = self.remap_to_used(lowercase__ )
__UpperCAmelCase = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__UpperCAmelCase = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Dict:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__UpperCAmelCase = indices.reshape(shape[0] , -1 ) # add batch axis
__UpperCAmelCase = self.unmap_to_all(lowercase__ )
__UpperCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__UpperCAmelCase = self.embedding(lowercase__ )
if shape is not None:
__UpperCAmelCase = z_q.view(lowercase__ )
# reshape back to match original input shape
__UpperCAmelCase = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A_ ( _a ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=False ) -> Tuple:
__UpperCAmelCase = parameters
__UpperCAmelCase , __UpperCAmelCase = torch.chunk(lowercase__ , 2 , dim=1 )
__UpperCAmelCase = torch.clamp(self.logvar , -30.0 , 20.0 )
__UpperCAmelCase = deterministic
__UpperCAmelCase = torch.exp(0.5 * self.logvar )
__UpperCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__UpperCAmelCase = __UpperCAmelCase = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase_ (self , lowercase__ = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
__UpperCAmelCase = randn_tensor(
self.mean.shape , generator=lowercase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__UpperCAmelCase = self.mean + self.std * sample
return x
def lowerCAmelCase_ (self , lowercase__=None ) -> Any:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__=[1, 2, 3] ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
__UpperCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
return self.mean
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class A_ ( _a ):
'''simple docstring'''
a__ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"text": Value("string" )} )
a__ = Features({"labels": ClassLabel} )
a__ = "text"
a__ = "labels"
def lowerCAmelCase_ (self , lowercase__ ) -> str:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCAmelCase = copy.deepcopy(self )
__UpperCAmelCase = self.label_schema.copy()
__UpperCAmelCase = features[self.label_column]
__UpperCAmelCase = label_schema
return task_template
@property
def lowerCAmelCase_ (self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 1 |
import os
def __a ( SCREAMING_SNAKE_CASE = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) as input_file:
__UpperCAmelCase = [
[int(SCREAMING_SNAKE_CASE ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = len(matrix[0] )
__UpperCAmelCase = [[-1 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = matrix[i][0]
for j in range(1 , SCREAMING_SNAKE_CASE ):
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
A_ : Optional[int] = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class A_ ( _a ):
'''simple docstring'''
a__ = "tapas"
def __init__(self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_024 , lowercase__=[3, 256, 256, 2, 256, 256, 10] , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=0 , lowercase__=10.0 , lowercase__=0 , lowercase__=1.0 , lowercase__=None , lowercase__=1.0 , lowercase__=False , lowercase__=None , lowercase__=1.0 , lowercase__=1.0 , lowercase__=False , lowercase__=False , lowercase__="ratio" , lowercase__=None , lowercase__=None , lowercase__=64 , lowercase__=32 , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__=None , lowercase__=None , **lowercase__ , ) -> int:
super().__init__(pad_token_id=lowercase__ , **lowercase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_sizes
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
__UpperCAmelCase = positive_label_weight
__UpperCAmelCase = num_aggregation_labels
__UpperCAmelCase = aggregation_loss_weight
__UpperCAmelCase = use_answer_as_supervision
__UpperCAmelCase = answer_loss_importance
__UpperCAmelCase = use_normalized_answer_loss
__UpperCAmelCase = huber_loss_delta
__UpperCAmelCase = temperature
__UpperCAmelCase = aggregation_temperature
__UpperCAmelCase = use_gumbel_for_cells
__UpperCAmelCase = use_gumbel_for_aggregation
__UpperCAmelCase = average_approximation_function
__UpperCAmelCase = cell_selection_preference
__UpperCAmelCase = answer_loss_cutoff
__UpperCAmelCase = max_num_rows
__UpperCAmelCase = max_num_columns
__UpperCAmelCase = average_logits_per_cell
__UpperCAmelCase = select_one_column
__UpperCAmelCase = allow_empty_column_selection
__UpperCAmelCase = init_cell_selection_weights_to_zero
__UpperCAmelCase = reset_position_index_per_cell
__UpperCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
__UpperCAmelCase = aggregation_labels
__UpperCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowercase__ ):
__UpperCAmelCase = {int(lowercase__ ): v for k, v in aggregation_labels.items()}
| 333 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> Tuple:
__UpperCAmelCase = [[] for _ in range(lowercase__ )]
__UpperCAmelCase = size
def __getitem__(self , lowercase__ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def lowerCAmelCase_ (self ) -> int:
return self._size
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> int | None:
__UpperCAmelCase = deque([start_vertex] )
__UpperCAmelCase = [None] * self.size
__UpperCAmelCase = 0
while queue:
__UpperCAmelCase = queue.popleft()
__UpperCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__UpperCAmelCase = current_distance + edge.weight
__UpperCAmelCase = distances[edge.destination_vertex]
if (
isinstance(lowercase__ , lowercase__ )
and new_distance >= dest_vertex_distance
):
continue
__UpperCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
import math
import sys
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCAmelCase = [-1] * (number + 1)
__UpperCAmelCase = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase = sys.maxsize
__UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase = 1 + answers[i - (j**2)]
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Tuple = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A_ ( _a ):
'''simple docstring'''
a__ = "segformer"
def __init__(self , lowercase__=3 , lowercase__=4 , lowercase__=[2, 2, 2, 2] , lowercase__=[8, 4, 2, 1] , lowercase__=[32, 64, 160, 256] , lowercase__=[7, 3, 3, 3] , lowercase__=[4, 2, 2, 2] , lowercase__=[1, 2, 5, 8] , lowercase__=[4, 4, 4, 4] , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=0.1 , lowercase__=1E-6 , lowercase__=256 , lowercase__=255 , **lowercase__ , ) -> List[Any]:
super().__init__(**lowercase__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , lowercase__ , )
__UpperCAmelCase = num_channels
__UpperCAmelCase = num_encoder_blocks
__UpperCAmelCase = depths
__UpperCAmelCase = sr_ratios
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = strides
__UpperCAmelCase = mlp_ratios
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = classifier_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = decoder_hidden_size
__UpperCAmelCase = kwargs.get('''reshape_last_stage''' , lowercase__ )
__UpperCAmelCase = semantic_loss_ignore_index
class A_ ( _a ):
'''simple docstring'''
a__ = version.parse("1.11" )
@property
def lowerCAmelCase_ (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase_ (self ) -> float:
return 1E-4
@property
def lowerCAmelCase_ (self ) -> int:
return 12
| 333 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A_ : List[str] = ['gpt2']
A_ : Union[str, Any] = 'gpt2'
if is_tf_available():
class A_ ( tf.Module ):
'''simple docstring'''
def __init__(self , lowercase__ ) -> int:
super().__init__()
__UpperCAmelCase = tokenizer
__UpperCAmelCase = AutoConfig.from_pretrained(lowercase__ )
__UpperCAmelCase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def lowerCAmelCase_ (self , lowercase__ ) -> List[str]:
__UpperCAmelCase = self.tokenizer(lowercase__ )
__UpperCAmelCase = tokenized['''input_ids'''].to_tensor()
__UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCAmelCase = self.model(input_ids=lowercase__ , attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Optional[Any]:
super().setUp()
__UpperCAmelCase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase_ (self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCAmelCase = python_outputs[key].numpy()
__UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__UpperCAmelCase = tf.constant(lowercase__ )
__UpperCAmelCase = compiled_tokenizer(lowercase__ )
__UpperCAmelCase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase_ (self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = ModelToSave(tokenizer=lowercase__ )
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ , lowercase__ , signatures={'''serving_default''': model.serving} )
__UpperCAmelCase = tf.saved_model.load(lowercase__ )
__UpperCAmelCase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCAmelCase_ (self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__UpperCAmelCase = tf_tokenizer.get_config()
__UpperCAmelCase = TFGPTaTokenizer.from_config(lowercase__ )
__UpperCAmelCase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCAmelCase_ (self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCAmelCase = 123_123
for max_length in [3, 5, 1_024]:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(lowercase__ , max_length=lowercase__ )
__UpperCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ : Optional[Any] = 300 # TEMPERATURE (unit = K)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import functools
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
# Validation
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(SCREAMING_SNAKE_CASE ) >= 3_6_6:
raise ValueError('''All days elements should be less than 366''' )
__UpperCAmelCase = set(SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 1 |
from __future__ import annotations
A_ : Tuple = 10
def __a ( SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase = 1
__UpperCAmelCase = max(SCREAMING_SNAKE_CASE )
while placement <= max_digit:
# declare and initialize empty buckets
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
# split list_of_ints between the buckets
for i in list_of_ints:
__UpperCAmelCase = int((i / placement) % RADIX )
buckets[tmp].append(SCREAMING_SNAKE_CASE )
# put each buckets' contents into list_of_ints
__UpperCAmelCase = 0
for b in range(SCREAMING_SNAKE_CASE ):
for i in buckets[b]:
__UpperCAmelCase = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Any = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = DPTConfig()
if "large" in checkpoint_url:
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = [5, 1_1, 1_7, 2_3]
__UpperCAmelCase = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__UpperCAmelCase = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
__UpperCAmelCase = True
__UpperCAmelCase = 1_5_0
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''ade20k-id2label.json'''
__UpperCAmelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
__UpperCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__UpperCAmelCase = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
__UpperCAmelCase = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__UpperCAmelCase = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__UpperCAmelCase = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
__UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__UpperCAmelCase = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__UpperCAmelCase = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__UpperCAmelCase = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__UpperCAmelCase = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__UpperCAmelCase = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__UpperCAmelCase = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__UpperCAmelCase = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__UpperCAmelCase = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__UpperCAmelCase = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__UpperCAmelCase = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__UpperCAmelCase = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__UpperCAmelCase = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__UpperCAmelCase = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__UpperCAmelCase = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__UpperCAmelCase = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__UpperCAmelCase = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__UpperCAmelCase = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__UpperCAmelCase = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__UpperCAmelCase = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__UpperCAmelCase = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
__UpperCAmelCase = in_proj_bias[: config.hidden_size]
__UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = get_dpt_config(SCREAMING_SNAKE_CASE )
# load original state_dict from URL
__UpperCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
__UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
__UpperCAmelCase = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
__UpperCAmelCase = 4_8_0 if '''ade''' in checkpoint_url else 3_8_4
__UpperCAmelCase = DPTImageProcessor(size=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
__UpperCAmelCase = model(**SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE ).predicted_depth
# Assert logits
__UpperCAmelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
__UpperCAmelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE )
)
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
A_ : int = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE = 1_0 , SCREAMING_SNAKE_CASE = 1_0_0_0 , SCREAMING_SNAKE_CASE = True ) -> int:
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(SCREAMING_SNAKE_CASE ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
__UpperCAmelCase = lower
__UpperCAmelCase = higher
__UpperCAmelCase = []
while True:
__UpperCAmelCase = get_avg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
last_numbers.append(SCREAMING_SNAKE_CASE )
if answer(SCREAMING_SNAKE_CASE ) == "low":
__UpperCAmelCase = number
elif answer(SCREAMING_SNAKE_CASE ) == "high":
__UpperCAmelCase = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def __a ( ) -> None:
'''simple docstring'''
__UpperCAmelCase = int(input('''Enter lower value : ''' ).strip() )
__UpperCAmelCase = int(input('''Enter high value : ''' ).strip() )
__UpperCAmelCase = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 333 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __a ( SCREAMING_SNAKE_CASE ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Predict target for test data
__UpperCAmelCase = xgb.predict(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = predictions.reshape(len(SCREAMING_SNAKE_CASE ) , 1 )
return predictions
def __a ( ) -> None:
'''simple docstring'''
__UpperCAmelCase = fetch_california_housing()
__UpperCAmelCase , __UpperCAmelCase = data_handling(SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 )
__UpperCAmelCase = xgboost(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 333 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
A_ : int = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
A_ : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A_ : Optional[int] = dict(zip(vocab, range(len(vocab))))
A_ : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = Path(tmpdirname)
A_ : List[Any] = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
A_ : Optional[int] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
A_ : List[str] = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
A_ : Any = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
A_ : Optional[Any] = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
A_ : Dict = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
A_ : List[Any] = tokenizer(['Making tiny model'], return_tensors='pt')
A_ : Optional[Any] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
A_ : List[str] = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = {}
state_dict.pop('''pixel_mean''' , SCREAMING_SNAKE_CASE )
state_dict.pop('''pixel_std''' , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__UpperCAmelCase = key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = int(re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
__UpperCAmelCase = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
__UpperCAmelCase = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
__UpperCAmelCase = key.replace('''layers.2''' , '''proj_out''' )
__UpperCAmelCase = value
__UpperCAmelCase = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = hf_hub_download(SCREAMING_SNAKE_CASE , f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
__UpperCAmelCase = SamConfig()
elif "sam_vit_l" in model_name:
__UpperCAmelCase = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
__UpperCAmelCase = SamConfig(
vision_config=SCREAMING_SNAKE_CASE , )
elif "sam_vit_h" in model_name:
__UpperCAmelCase = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
__UpperCAmelCase = SamConfig(
vision_config=SCREAMING_SNAKE_CASE , )
__UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCAmelCase = replace_keys(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = SamImageProcessor()
__UpperCAmelCase = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = SamModel(SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = hf_model.to('''cuda''' )
__UpperCAmelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
__UpperCAmelCase = [[[4_0_0, 6_5_0]]]
__UpperCAmelCase = [[1]]
__UpperCAmelCase = processor(images=np.array(SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE )
__UpperCAmelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
__UpperCAmelCase = processor(
images=np.array(SCREAMING_SNAKE_CASE ) , input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE )
__UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
__UpperCAmelCase = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
__UpperCAmelCase = processor(images=np.array(SCREAMING_SNAKE_CASE ) , input_boxes=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE )
__UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
__UpperCAmelCase = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
__UpperCAmelCase = [[1, 1]]
__UpperCAmelCase = processor(
images=np.array(SCREAMING_SNAKE_CASE ) , input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE )
__UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
A_ : List[Any] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
A_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 333 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 1 |
import os
import numpy
import onnx
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = a.name
__UpperCAmelCase = b.name
__UpperCAmelCase = ''''''
__UpperCAmelCase = ''''''
__UpperCAmelCase = a == b
__UpperCAmelCase = name_a
__UpperCAmelCase = name_b
return res
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = list(model.graph.initializer )
__UpperCAmelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCAmelCase = inits[i].name
__UpperCAmelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = os.path.dirname(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = os.path.basename(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = onnx.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = list(model.graph.initializer )
__UpperCAmelCase = set()
__UpperCAmelCase = {}
__UpperCAmelCase = []
__UpperCAmelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE )
dup_set.add(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = inits[j].data_type
__UpperCAmelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('''unexpected data type: ''' , SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
__UpperCAmelCase = inits[i].name
__UpperCAmelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , '''GB''' )
__UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''optimized_''' + model_file_name
__UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
onnx.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return new_model
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
from __future__ import annotations
import math
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> None:
__UpperCAmelCase = size
# approximate the overall size of segment tree with given value
__UpperCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__UpperCAmelCase = [0 for i in range(0 , 4 * size )]
__UpperCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCAmelCase_ (self , lowercase__ ) -> int:
return idx * 2
def lowerCAmelCase_ (self , lowercase__ ) -> int:
return idx * 2 + 1
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> None:
if left_element == right_element:
__UpperCAmelCase = a[left_element - 1]
else:
__UpperCAmelCase = (left_element + right_element) // 2
self.build(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ )
self.build(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ )
__UpperCAmelCase = max(
self.segment_tree[self.left(lowercase__ )] , self.segment_tree[self.right(lowercase__ )] )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> bool:
if self.flag[idx] is True:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = False
if left_element != right_element:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = True
__UpperCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__UpperCAmelCase = val
if left_element != right_element:
__UpperCAmelCase = val
__UpperCAmelCase = val
__UpperCAmelCase = True
__UpperCAmelCase = True
return True
__UpperCAmelCase = (left_element + right_element) // 2
self.update(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.update(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__UpperCAmelCase = max(
self.segment_tree[self.left(lowercase__ )] , self.segment_tree[self.right(lowercase__ )] )
return True
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int | float:
if self.flag[idx] is True:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = False
if left_element != right_element:
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = self.lazy[idx]
__UpperCAmelCase = True
__UpperCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__UpperCAmelCase = (left_element + right_element) // 2
__UpperCAmelCase = self.query(self.left(lowercase__ ) , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__UpperCAmelCase = self.query(self.right(lowercase__ ) , mid + 1 , lowercase__ , lowercase__ , lowercase__ )
return max(lowercase__ , lowercase__ )
def __str__(self ) -> str:
return str([self.query(1 , 1 , self.size , lowercase__ , lowercase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
A_ : List[Any] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
A_ : Optional[Any] = 15
A_ : List[Any] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A_ : Tuple = logging.get_logger(__name__)
A_ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Optional[int] = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
A_ : Union[str, Any] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
a__ = RobertaTokenizer
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , lowercase__=True , **lowercase__ , ) -> int:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = '''post_processor'''
__UpperCAmelCase = getattr(self.backend_tokenizer , lowercase__ , lowercase__ )
if tokenizer_component_instance:
__UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCAmelCase = tuple(state['''sep'''] )
if "cls" in state:
__UpperCAmelCase = tuple(state['''cls'''] )
__UpperCAmelCase = False
if state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = True
if state.get('''trim_offsets''' , lowercase__ ) != trim_offsets:
__UpperCAmelCase = trim_offsets
__UpperCAmelCase = True
if changes_to_apply:
__UpperCAmelCase = getattr(lowercase__ , state.pop('''type''' ) )
__UpperCAmelCase = component_class(**lowercase__ )
setattr(self.backend_tokenizer , lowercase__ , lowercase__ )
@property
def lowerCAmelCase_ (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ (self , lowercase__ ) -> List[str]:
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else value
__UpperCAmelCase = value
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__=None ) -> Optional[int]:
__UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = n
__UpperCAmelCase = [None] * self.n
__UpperCAmelCase = 0 # index of the first element
__UpperCAmelCase = 0
__UpperCAmelCase = 0
def __len__(self ) -> int:
return self.size
def lowerCAmelCase_ (self ) -> bool:
return self.size == 0
def lowerCAmelCase_ (self ) -> Dict:
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
__UpperCAmelCase = data
__UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase_ (self ) -> Any:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
__UpperCAmelCase = self.array[self.front]
__UpperCAmelCase = None
__UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
'''simple docstring'''
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = equationa
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = equationa
# Calculate the determinants of the matrices
__UpperCAmelCase = aa * ba - aa * ba
__UpperCAmelCase = ca * ba - ca * ba
__UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__UpperCAmelCase = determinant_x / determinant
__UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 333 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = gather(SCREAMING_SNAKE_CASE )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = [state.process_index]
__UpperCAmelCase = gather_object(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == state.num_processes, f'''{gathered_obj}, {len(SCREAMING_SNAKE_CASE )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = broadcast(SCREAMING_SNAKE_CASE )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__UpperCAmelCase = torch.arange(state.num_processes ).to(state.device )
__UpperCAmelCase = pad_across_processes(SCREAMING_SNAKE_CASE )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
__UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = reduce(SCREAMING_SNAKE_CASE , '''sum''' )
__UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), f'''{reduced_tensor} != {truth_tensor}'''
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
__UpperCAmelCase = create_tensor(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = reduce(SCREAMING_SNAKE_CASE , '''mean''' )
__UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), f'''{reduced_tensor} != {truth_tensor}'''
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = PartialState()
state.print(f'''State: {state}''' )
state.print('''testing gather''' )
test_gather(SCREAMING_SNAKE_CASE )
state.print('''testing gather_object''' )
test_gather_object(SCREAMING_SNAKE_CASE )
state.print('''testing broadcast''' )
test_broadcast(SCREAMING_SNAKE_CASE )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(SCREAMING_SNAKE_CASE )
state.print('''testing reduce_sum''' )
test_reduce_sum(SCREAMING_SNAKE_CASE )
state.print('''testing reduce_mean''' )
test_reduce_mean(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[str] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ = "" , lowercase__ = False ) -> None:
# Mapping from the first character of the prefix of the node
__UpperCAmelCase = {}
# A node will be a leaf if the tree contains its word
__UpperCAmelCase = is_leaf
__UpperCAmelCase = prefix
def lowerCAmelCase_ (self , lowercase__ ) -> tuple[str, str, str]:
__UpperCAmelCase = 0
for q, w in zip(self.prefix , lowercase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCAmelCase_ (self , lowercase__ ) -> None:
for word in words:
self.insert(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__UpperCAmelCase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCAmelCase = RadixNode(prefix=lowercase__ , is_leaf=lowercase__ )
else:
__UpperCAmelCase = self.nodes[word[0]]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = incoming_node.match(
lowercase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowercase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCAmelCase = remaining_prefix
__UpperCAmelCase = self.nodes[matching_string[0]]
__UpperCAmelCase = RadixNode(lowercase__ , lowercase__ )
__UpperCAmelCase = aux_node
if remaining_word == "":
__UpperCAmelCase = True
else:
self.nodes[matching_string[0]].insert(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> bool:
__UpperCAmelCase = self.nodes.get(word[0] , lowercase__ )
if not incoming_node:
return False
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = incoming_node.match(
lowercase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> bool:
__UpperCAmelCase = self.nodes.get(word[0] , lowercase__ )
if not incoming_node:
return False
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = incoming_node.match(
lowercase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowercase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__UpperCAmelCase = list(self.nodes.values() )[0]
__UpperCAmelCase = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCAmelCase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__UpperCAmelCase = False
# If there is 1 edge, we merge it with its child
else:
__UpperCAmelCase = list(incoming_node.nodes.values() )[0]
__UpperCAmelCase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCAmelCase = merging_node.nodes
return True
def lowerCAmelCase_ (self , lowercase__ = 0 ) -> None:
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __a ( ) -> bool:
'''simple docstring'''
__UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
__UpperCAmelCase = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE )
assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def __a ( ) -> None:
'''simple docstring'''
assert test_trie()
def __a ( ) -> None:
'''simple docstring'''
__UpperCAmelCase = RadixNode()
__UpperCAmelCase = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(SCREAMING_SNAKE_CASE )
print('''Words:''' , SCREAMING_SNAKE_CASE )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 333 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ ( _a ):
'''simple docstring'''
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def lowerCAmelCase_ (self ) -> str:
super().setup()
__UpperCAmelCase = self.model.config
__UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__UpperCAmelCase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Any:
__UpperCAmelCase = labels
return self.pre_processor(
[text] * len(lowercase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class A_ ( _a ):
'''simple docstring'''
a__ = "open-llama"
def __init__(self , lowercase__=100_000 , lowercase__=4_096 , lowercase__=11_008 , lowercase__=32 , lowercase__=32 , lowercase__="silu" , lowercase__=2_048 , lowercase__=0.02 , lowercase__=1E-6 , lowercase__=True , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=False , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=True , lowercase__=True , lowercase__=None , **lowercase__ , ) -> Union[str, Any]:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = initializer_range
__UpperCAmelCase = rms_norm_eps
__UpperCAmelCase = use_cache
__UpperCAmelCase = kwargs.pop(
'''use_memorry_efficient_attention''' , lowercase__ )
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_dropout_prob
__UpperCAmelCase = use_stable_embedding
__UpperCAmelCase = shared_input_output_embedding
__UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , tie_word_embeddings=lowercase__ , **lowercase__ , )
def lowerCAmelCase_ (self ) -> Dict:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
__UpperCAmelCase = self.rope_scaling.get('''type''' , lowercase__ )
__UpperCAmelCase = self.rope_scaling.get('''factor''' , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 333 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( _a ):
'''simple docstring'''
a__ = ["image_processor", "tokenizer"]
a__ = "CLIPImageProcessor"
a__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ) -> Dict:
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__UpperCAmelCase = kwargs.pop('''feature_extractor''' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ) -> int:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if images is not None:
__UpperCAmelCase = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Optional[int]:
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> Dict:
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase_ (self ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def lowerCAmelCase_ (self ) -> List[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 333 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
__UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE ) )[2:]
__UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE ) , b_binary.zfill(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
import math
import sys
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCAmelCase = [-1] * (number + 1)
__UpperCAmelCase = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase = sys.maxsize
__UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase = 1 + answers[i - (j**2)]
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import functools
from typing import Any
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
# Validation
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
__UpperCAmelCase = {}
__UpperCAmelCase = '''WORD_KEEPER'''
for word in words:
__UpperCAmelCase = trie
for c in word:
if c not in trie_node:
__UpperCAmelCase = {}
__UpperCAmelCase = trie_node[c]
__UpperCAmelCase = True
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE ) -> bool:
if index == len_string:
return True
__UpperCAmelCase = trie
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = trie_node.get(string[i] , SCREAMING_SNAKE_CASE )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class A_ ( yaml.SafeLoader ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ ) -> int:
__UpperCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
__UpperCAmelCase = [tuple(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else key for key in keys]
__UpperCAmelCase = Counter(lowercase__ )
__UpperCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCAmelCase_ (self , lowercase__ , lowercase__=False ) -> Union[str, Any]:
__UpperCAmelCase = super().construct_mapping(lowercase__ , deep=lowercase__ )
self._check_no_duplicates_on_constructed_node(lowercase__ )
return mapping
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple[Optional[str], str]:
'''simple docstring'''
__UpperCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__UpperCAmelCase = full_content[1:].index('''---''' ) + 1
__UpperCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(SCREAMING_SNAKE_CASE )
class A_ ( _a ):
'''simple docstring'''
a__ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def lowerCAmelCase_ (cls , lowercase__ ) -> "DatasetMetadata":
with open(lowercase__ , encoding='''utf-8''' ) as readme_file:
__UpperCAmelCase , __UpperCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowercase__ )
else:
return cls()
def lowerCAmelCase_ (self , lowercase__ ) -> Union[str, Any]:
if path.exists():
with open(lowercase__ , encoding='''utf-8''' ) as readme_file:
__UpperCAmelCase = readme_file.read()
else:
__UpperCAmelCase = None
__UpperCAmelCase = self._to_readme(lowercase__ )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ = None ) -> str:
if readme_content is not None:
__UpperCAmelCase , __UpperCAmelCase = _split_yaml_from_readme(lowercase__ )
__UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
__UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def lowerCAmelCase_ (cls , lowercase__ ) -> "DatasetMetadata":
__UpperCAmelCase = yaml.load(lowercase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__UpperCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowercase__ )
def lowerCAmelCase_ (self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowercase__ , allow_unicode=lowercase__ , encoding='''utf-8''' , ).decode('''utf-8''' )
A_ : str = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A_ : Optional[int] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
A_ : Optional[Any] = ap.parse_args()
A_ : Union[str, Any] = Path(args.readme_filepath)
A_ : List[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 333 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class A_ ( _a ):
'''simple docstring'''
a__ = "distilbert"
a__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self , lowercase__=30_522 , lowercase__=512 , lowercase__=False , lowercase__=6 , lowercase__=12 , lowercase__=768 , lowercase__=4 * 768 , lowercase__=0.1 , lowercase__=0.1 , lowercase__="gelu" , lowercase__=0.02 , lowercase__=0.1 , lowercase__=0.2 , lowercase__=0 , **lowercase__ , ) -> List[Any]:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = sinusoidal_pos_embds
__UpperCAmelCase = n_layers
__UpperCAmelCase = n_heads
__UpperCAmelCase = dim
__UpperCAmelCase = hidden_dim
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation
__UpperCAmelCase = initializer_range
__UpperCAmelCase = qa_dropout
__UpperCAmelCase = seq_classif_dropout
super().__init__(**lowercase__ , pad_token_id=lowercase__ )
class A_ ( _a ):
'''simple docstring'''
@property
def lowerCAmelCase_ (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 333 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 1 |
from __future__ import annotations
from typing import Any
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Dict = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
A_ : int = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
A_ : List[Any] = 0
A_ : int = 0xe_000
A_ : List[str] = 0xe_001
A_ : List[Any] = 0xe_002
A_ : List[str] = 0xe_003
A_ : Tuple = 0xe_004
# Maps special codepoints to human-readable names.
A_ : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
A_ : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A_ ( _a ):
'''simple docstring'''
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , lowercase__=chr(lowercase__ ) , lowercase__=chr(lowercase__ ) , lowercase__=chr(lowercase__ ) , lowercase__=chr(lowercase__ ) , lowercase__=chr(lowercase__ ) , lowercase__=chr(lowercase__ ) , lowercase__=False , lowercase__=2_048 , **lowercase__ , ) -> str:
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , model_max_length=lowercase__ , **lowercase__ , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCAmelCase = UNICODE_VOCAB_SIZE
__UpperCAmelCase = len(self._special_codepoints )
@property
def lowerCAmelCase_ (self ) -> int:
return self._unicode_vocab_size
def lowerCAmelCase_ (self , lowercase__ ) -> List[str]:
return list(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> int:
try:
return ord(lowercase__ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase__ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def lowerCAmelCase_ (self , lowercase__ ) -> List[str]:
return "".join(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
__UpperCAmelCase = [1] + ([0] * len(lowercase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase__ )) + [1]
return result
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Optional[int]:
return ()
| 333 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Dict = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
__UpperCAmelCase = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename='''pytorch_model.bin''' ) )
__UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
__UpperCAmelCase = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
__UpperCAmelCase = tensor_value
__UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# convert tokenizer
__UpperCAmelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : Optional[int] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 333 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ) -> Union[str, Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ (self ) -> Dict:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__UpperCAmelCase = MegatronBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
__UpperCAmelCase = model(lowercase__ , token_type_ids=lowercase__ )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
__UpperCAmelCase = MegatronBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = MegatronBertForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
__UpperCAmelCase = MegatronBertForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> str:
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__=False ) -> List[Any]:
__UpperCAmelCase = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=lowercase__ , hidden_size=37 )
def lowerCAmelCase_ (self ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase__ )
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE , )
A_ : List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ['''MYDIR'''] , lowercase__ )
__UpperCAmelCase = MegatronBertModel.from_pretrained(lowercase__ )
model.to(lowercase__ )
model.half()
__UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase = model(lowercase__ )[0]
__UpperCAmelCase = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , lowercase__ )
__UpperCAmelCase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = '''ii={} jj={} a={} b={}'''.format(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(math.isclose(lowercase__ , lowercase__ , rel_tol=lowercase__ , abs_tol=lowercase__ ) , msg=lowercase__ )
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
import requests
from bsa import BeautifulSoup
def __a ( SCREAMING_SNAKE_CASE = "AAPL" ) -> str:
'''simple docstring'''
__UpperCAmelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
__UpperCAmelCase = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 333 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int = logging.get_logger(__name__)
A_ : Optional[int] = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase = k.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if k.startswith('''encoder''' ):
__UpperCAmelCase = k.replace('''.attn''' , '''.self_attn''' )
__UpperCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__UpperCAmelCase = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
__UpperCAmelCase = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
__UpperCAmelCase = sd.pop(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
__UpperCAmelCase = v
A_ : str = ['START']
@torch.no_grad()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCAmelCase = model['''model''']
__UpperCAmelCase = BlenderbotConfig.from_json_file(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = BlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = m.model.state_dict().keys()
__UpperCAmelCase = []
__UpperCAmelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase = rename_state_dict_key(SCREAMING_SNAKE_CASE )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(SCREAMING_SNAKE_CASE )
m.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
m.half()
m.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
A_ : List[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (DDIMParallelScheduler,)
a__ = (("eta", 0.0), ("num_inference_steps", 50))
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , **lowercase__ ) -> str:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = 10, 0.0
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for t in scheduler.timesteps:
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> List[str]:
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(steps_offset=1 )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCAmelCase_ (self ) -> str:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
self.check_over_configs(thresholding=lowercase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , )
def lowerCAmelCase_ (self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase__ , num_inference_steps=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase__ , eta=lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = 10, 0.0
scheduler.set_timesteps(lowercase__ )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = self.dummy_sample_deter + 0.1
__UpperCAmelCase = self.dummy_sample_deter - 0.1
__UpperCAmelCase = samplea.shape[0]
__UpperCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCAmelCase = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ )
__UpperCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCAmelCase = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase__ )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowerCAmelCase_ (self ) -> int:
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowerCAmelCase_ (self ) -> Any:
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase = self.full_loop(set_alpha_to_one=lowercase__ , beta_start=0.01 )
__UpperCAmelCase = torch.sum(torch.abs(lowercase__ ) )
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase = None
if token is not None:
__UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__UpperCAmelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
__UpperCAmelCase = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = requests.get(url + f'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = None
if token is not None:
__UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__UpperCAmelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
__UpperCAmelCase = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = requests.get(url + f'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = None
if token is not None:
__UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE , allow_redirects=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = result.headers['''Location''']
__UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE , allow_redirects=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , f'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as fp:
fp.write(response.content )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE ) as f:
for line in f:
__UpperCAmelCase = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__UpperCAmelCase = line[: line.index(''': ''' )]
__UpperCAmelCase = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__UpperCAmelCase = line[len('''FAILED ''' ) :]
failed_tests.append(SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
__UpperCAmelCase = line
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE )} for `errors` '''
f'''and {len(SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
__UpperCAmelCase = None
if job_name and job_links:
__UpperCAmelCase = job_links.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
__UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
return result
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for p in os.listdir(SCREAMING_SNAKE_CASE ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE , job_links=SCREAMING_SNAKE_CASE ) )
return errors
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
__UpperCAmelCase = counter.most_common()
__UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__UpperCAmelCase = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__UpperCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE ) )
return r
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__UpperCAmelCase = test.split('''/''' )[2]
else:
__UpperCAmelCase = None
return test
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
__UpperCAmelCase = [x for x in logs if x[2] is not None]
__UpperCAmelCase = {x[2] for x in logs}
__UpperCAmelCase = {}
for test in tests:
__UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__UpperCAmelCase = counter.most_common()
__UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
__UpperCAmelCase = {'''count''': n_errors, '''errors''': error_counts}
__UpperCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE ) )
return r
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = '''| no. | error | status |'''
__UpperCAmelCase = '''|-:|:-|:-|'''
__UpperCAmelCase = [header, sep]
for error in reduced_by_error:
__UpperCAmelCase = reduced_by_error[error]['''count''']
__UpperCAmelCase = f'''| {count} | {error[:1_0_0]} | |'''
lines.append(SCREAMING_SNAKE_CASE )
return "\n".join(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = '''| model | no. of errors | major error | count |'''
__UpperCAmelCase = '''|-:|-:|-:|-:|'''
__UpperCAmelCase = [header, sep]
for model in reduced_by_model:
__UpperCAmelCase = reduced_by_model[model]['''count''']
__UpperCAmelCase , __UpperCAmelCase = list(reduced_by_model[model]['''errors'''].items() )[0]
__UpperCAmelCase = f'''| {model} | {count} | {error[:6_0]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE )
return "\n".join(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
A_ : str = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token)
A_ : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ : Dict = k.find(' / ')
A_ : Any = k[index + len(' / ') :]
A_ : Dict = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ : int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ : List[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ : List[Any] = reduce_by_error(errors)
A_ : Tuple = reduce_by_model(errors)
A_ : int = make_github_table(reduced_by_error)
A_ : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase = ''''''
else:
__UpperCAmelCase = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase = in_proj_bias[: config.hidden_size]
__UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = val
def __a ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
__UpperCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase = 1_0_0_0
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''imagenet-1k-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = int(deit_name[-6:-4] )
__UpperCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
__UpperCAmelCase = 1_9_2
__UpperCAmelCase = 7_6_8
__UpperCAmelCase = 1_2
__UpperCAmelCase = 3
elif deit_name[9:].startswith('''small''' ):
__UpperCAmelCase = 3_8_4
__UpperCAmelCase = 1_5_3_6
__UpperCAmelCase = 1_2
__UpperCAmelCase = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
# load original model from timm
__UpperCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCAmelCase = timm_model.state_dict()
__UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
__UpperCAmelCase = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
__UpperCAmelCase = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__UpperCAmelCase = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE , crop_size=config.image_size )
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase = encoding['''pixel_values''']
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = timm_model(SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A_ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
__UpperCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
__UpperCAmelCase = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Optional[int]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> str:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = CLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
__UpperCAmelCase = CLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase__ )
self.assertIsInstance(processor_fast.tokenizer , lowercase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase__ )
self.assertIsInstance(processor_fast.image_processor , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCAmelCase = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
__UpperCAmelCase = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = image_processor(lowercase__ , return_tensors='''np''' )
__UpperCAmelCase = processor(images=lowercase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = processor(text=lowercase__ )
__UpperCAmelCase = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase = processor.batch_decode(lowercase__ )
__UpperCAmelCase = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.get_image_processor()
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = CLIPProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = self.prepare_image_inputs()
__UpperCAmelCase = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 333 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.