code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_4 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : str = batch_size
lowerCamelCase : Optional[int] = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_token_type_ids
lowerCamelCase : Optional[int] = use_input_mask
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Dict = use_mc_token_ids
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : int = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : List[str] = num_choices
lowerCamelCase : Any = scope
lowerCamelCase : List[Any] = self.vocab_size - 1
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Tuple = None
if self.use_token_type_ids:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Optional[int] = None
if self.use_mc_token_ids:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCamelCase : List[Any] = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Optional[int] = self.get_config()
lowerCamelCase : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Optional[int] = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
lowerCamelCase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Dict = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
lowerCamelCase
) : Any = config_and_inputs
lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Any = self.num_labels
lowerCamelCase : Union[str, Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase):
_UpperCAmelCase : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase : Dict = (CTRLLMHeadModel,) if is_torch_available() else ()
_UpperCAmelCase : Tuple = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Any = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = CTRLModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__a , n_embd=3_7 )
def UpperCamelCase__ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ ( self ):
pass
@require_torch
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : int = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__a )
lowerCamelCase : Union[str, Any] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=__a ) # Legal the president is
lowerCamelCase : List[Any] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase : List[str] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 287 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 0 |
def _a ( SCREAMING_SNAKE_CASE : int = 1000000 ):
"""simple docstring"""
UpperCamelCase__ : str = limit + 1
UpperCamelCase__ : int = [0] * limit
for first_term in range(1 , SCREAMING_SNAKE_CASE ):
for n in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : int = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCamelCase__ : Dict = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 51 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCamelCase : int = logging.get_logger(__name__)
class __magic_name__ ( __lowerCAmelCase):
A: str = ["pixel_values"]
def __init__( self : str , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ : List[str] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
UpperCamelCase__ : Dict = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
UpperCamelCase__ : Optional[Any] = do_resize
UpperCamelCase__ : List[Any] = size
UpperCamelCase__ : Optional[int] = resample
UpperCamelCase__ : Optional[int] = do_rescale
UpperCamelCase__ : Dict = rescale_factor
UpperCamelCase__ : Optional[Any] = do_center_crop
UpperCamelCase__ : int = crop_size
UpperCamelCase__ : List[str] = do_flip_channel_order
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PIL.Image.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCamelCase__ : int = get_resize_output_image_size(lowerCamelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Tuple , ) -> List[Any]:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(lowerCamelCase__ , data_format=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[Any] = resample if resample is not None else self.resample
UpperCamelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
UpperCamelCase__ : Tuple = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
UpperCamelCase__ : int = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Union[str, Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
UpperCamelCase__ : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
UpperCamelCase__ : Optional[Any] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ : List[Any] = [self.flip_channel_order(image=lowerCamelCase__ ) for image in images]
UpperCamelCase__ : Union[str, Any] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
UpperCamelCase__ : int = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Tuple] = None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCamelCase__ ):
UpperCamelCase__ : Tuple = target_sizes.numpy()
UpperCamelCase__ : Any = []
for idx in range(len(lowerCamelCase__ ) ):
UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
UpperCamelCase__ : Dict = logits.argmax(dim=1 )
UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 51 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __lt__( self : int , __a : List[Any] ):
return self[-1] < other[-1]
def __eq__( self : List[Any] , __a : Optional[int] ):
return self[-1] == other[-1]
def _lowerCamelCase ( lowercase : list ) -> list:
_a = []
# sort into stacks
for element in collection:
_a = Stack([element] )
_a = bisect_left(lowercase , lowercase )
if i != len(lowercase ):
stacks[i].append(lowercase )
else:
stacks.append(lowercase )
# use a heap-based merge to merge stack efficiently
_a = merge(*(reversed(lowercase ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ : Optional[int] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 63 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 1 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
inspect_dataset(lowercase_ , lowercase_ )
UpperCAmelCase = path + '.py'
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
inspect_metric(lowercase_ , lowercase_ )
UpperCAmelCase = path + '.py'
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = get_dataset_config_info(lowercase_ , config_name=lowercase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
with pytest.raises(lowercase_ ):
get_dataset_config_info(lowercase_ , config_name=lowercase_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = get_dataset_config_names(lowercase_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = get_dataset_infos(lowercase_ )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase = expected_configs[0]
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = get_dataset_infos(lowercase_ )
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
with pytest.raises(lowercase_ ):
get_dataset_split_names(lowercase_ , config_name=lowercase_ )
| 181 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ = get_tests_dir("""fixtures/dummy-config.json""")
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
UpperCAmelCase = 0
def UpperCAmelCase__ ( self :List[str] ) -> str:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Any:
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = AutoConfig.for_model('roberta' )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :str ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase = os.path.join(lowercase_ , 'fake-roberta' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(os.path.join(lowercase_ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(type(lowercase_ ) , lowercase_ )
def UpperCAmelCase__ ( self :int ) -> Union[str, Any]:
try:
AutoConfig.register('custom' , lowercase_ )
# Wrong model type will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('model' , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoConfig.register('bert' , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase__ ( self :str ) -> Dict:
with self.assertRaisesRegex(
lowercase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCAmelCase = AutoConfig.from_pretrained('bert-base' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
with self.assertRaisesRegex(
lowercase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , revision='aaaaaa' )
def UpperCAmelCase__ ( self :List[str] ) -> str:
with self.assertRaisesRegex(
lowercase_ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def UpperCAmelCase__ ( self :str ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ )
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ , trust_remote_code=lowercase_ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """new-model"""
try:
AutoConfig.register('new-model' , lowercase_ )
# If remote code is not set, the default is to use local
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
UpperCAmelCase = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=lowercase_ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 181 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __snake_case ( a__ ):
a__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 2_55 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase)
a__: Tuple = size if size is not None else {'shortest_edge': 2_24}
a__: str = get_size_dict(lowercase , default_to_square=lowercase)
a__: str = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__: Any = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size')
a__: Optional[Any] = do_resize
a__: Optional[Any] = size
a__: Dict = resample
a__: List[str] = do_center_crop
a__: Dict = crop_size
a__: int = do_rescale
a__: Tuple = rescale_factor
a__: Dict = do_normalize
a__: Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a__: str = image_std if image_std is not None else OPENAI_CLIP_STD
a__: str = do_convert_rgb
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__: str = get_size_dict(lowercase , default_to_square=lowercase)
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
a__: Optional[Any] = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase)
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__: List[Any] = get_size_dict(lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Dict:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> Any:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a__: Optional[int] = size if size is not None else self.size
a__: Tuple = get_size_dict(lowercase , param_name='size' , default_to_square=lowercase)
a__: Union[str, Any] = resample if resample is not None else self.resample
a__: int = do_center_crop if do_center_crop is not None else self.do_center_crop
a__: Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a__: Tuple = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase)
a__: int = do_rescale if do_rescale is not None else self.do_rescale
a__: int = rescale_factor if rescale_factor is not None else self.rescale_factor
a__: List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a__: List[str] = image_mean if image_mean is not None else self.image_mean
a__: int = image_std if image_std is not None else self.image_std
a__: Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__: Optional[Any] = make_list_of_images(lowercase)
if not valid_images(lowercase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__: Optional[Any] = [convert_to_rgb(lowercase) for image in images]
# All transformations expect numpy arrays.
a__: Optional[int] = [to_numpy_array(lowercase) for image in images]
if do_resize:
a__: Optional[int] = [self.resize(image=lowercase , size=lowercase , resample=lowercase) for image in images]
if do_center_crop:
a__: Dict = [self.center_crop(image=lowercase , size=lowercase) for image in images]
if do_rescale:
a__: int = [self.rescale(image=lowercase , scale=lowercase) for image in images]
if do_normalize:
a__: List[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase) for image in images]
a__: List[Any] = [to_channel_dimension_format(lowercase , lowercase) for image in images]
a__: Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase)
| 290 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 359 |
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self ) -> None:
lowerCAmelCase = {} # Mapping from char to TrieNode
lowerCAmelCase = False
def __snake_case ( self , A_ ) -> None:
for word in words:
self.insert(A_ )
def __snake_case ( self , A_ ) -> None:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase = TrieNode()
lowerCAmelCase = curr.nodes[char]
lowerCAmelCase = True
def __snake_case ( self , A_ ) -> bool:
lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def __snake_case ( self , A_ ) -> None:
def _delete(A_ , A_ , A_ ) -> bool:
if index == len(A_ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase = False
return len(curr.nodes ) == 0
lowerCAmelCase = word[index]
lowerCAmelCase = curr.nodes.get(A_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase = _delete(A_ , A_ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A_ , 0 )
def _snake_case ( _SCREAMING_SNAKE_CASE : TrieNode , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_SCREAMING_SNAKE_CASE , end=""" """ )
for key, value in node.nodes.items():
print_words(_SCREAMING_SNAKE_CASE , word + key )
def _snake_case ( ) -> bool:
"""simple docstring"""
lowerCAmelCase = """banana bananas bandana band apple all beast""".split()
lowerCAmelCase = TrieNode()
root.insert_many(_SCREAMING_SNAKE_CASE )
# print_words(root, "")
assert all(root.find(_SCREAMING_SNAKE_CASE ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool ) -> None:
"""simple docstring"""
print(str(_SCREAMING_SNAKE_CASE ) , """works!""" if passes else """doesn't work :(""" )
def _snake_case ( ) -> None:
"""simple docstring"""
assert test_trie()
def _snake_case ( ) -> None:
"""simple docstring"""
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main() | 187 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase ( UpperCamelCase__ ):
def __init__( self) -> Any:
self.test()
def a_ ( self) -> Any:
snake_case_ = 0
snake_case_ = False
while not completed:
if counter == 1:
self.reset()
snake_case_ = self.advance()
if not self.does_advance(__lowerCamelCase):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
snake_case_ = self.update(__lowerCamelCase)
counter += 1
if counter > 1_0000:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def a_ ( self) -> Tuple:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def a_ ( self, lowerCAmelCase__) -> List[Any]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def a_ ( self) -> Any:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def a_ ( self) -> List[Any]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
@abstractmethod
def a_ ( self, lowerCAmelCase__=False) -> Any:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class UpperCamelCase ( UpperCamelCase__ ):
def __init__( self, lowerCAmelCase__) -> Optional[int]:
super(__lowerCamelCase, self).__init__()
if not isinstance(__lowerCamelCase, __lowerCamelCase) or len(__lowerCamelCase) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.')
if any((not isinstance(__lowerCamelCase, __lowerCamelCase) or token_id < 0) for token_id in token_ids):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.')
snake_case_ = token_ids
snake_case_ = len(self.token_ids)
snake_case_ = -1 # the index of the currently fulfilled step
snake_case_ = False
def a_ ( self) -> List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def a_ ( self, lowerCAmelCase__) -> int:
if not isinstance(__lowerCamelCase, __lowerCamelCase):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCamelCase)}')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def a_ ( self, lowerCAmelCase__) -> Optional[int]:
if not isinstance(__lowerCamelCase, __lowerCamelCase):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCamelCase)}')
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(__lowerCamelCase):
self.fulfilled_idx += 1
snake_case_ = True
if self.fulfilled_idx == (self.seqlen - 1):
snake_case_ = True
snake_case_ = completed
else:
# failed to make progress.
snake_case_ = True
self.reset()
return stepped, completed, reset
def a_ ( self) -> Dict:
snake_case_ = False
snake_case_ = 0
def a_ ( self) -> str:
return self.seqlen - (self.fulfilled_idx + 1)
def a_ ( self, lowerCAmelCase__=False) -> Tuple:
snake_case_ = PhrasalConstraint(self.token_ids)
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.fulfilled_idx
snake_case_ = self.completed
return new_constraint
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=True) -> str:
snake_case_ = max([len(__lowerCamelCase) for one in nested_token_ids])
snake_case_ = {}
for token_ids in nested_token_ids:
snake_case_ = root
for tidx, token_id in enumerate(__lowerCamelCase):
if token_id not in level:
snake_case_ = {}
snake_case_ = level[token_id]
if no_subsets and self.has_subsets(__lowerCamelCase, __lowerCamelCase):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f' {nested_token_ids}.')
snake_case_ = root
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = self.trie
for current_token in current_seq:
snake_case_ = start[current_token]
snake_case_ = list(start.keys())
return next_tokens
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = self.next_tokens(__lowerCamelCase)
return len(__lowerCamelCase) == 0
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = list(root.values())
if len(__lowerCamelCase) == 0:
return 1
else:
return sum([self.count_leaves(__lowerCamelCase) for nn in next_nodes])
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = self.count_leaves(__lowerCamelCase)
return len(__lowerCamelCase) != leaf_count
class UpperCamelCase ( UpperCamelCase__ ):
def __init__( self, lowerCAmelCase__) -> int:
super(__lowerCamelCase, self).__init__()
if not isinstance(__lowerCamelCase, __lowerCamelCase) or len(__lowerCamelCase) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.')
if any(not isinstance(__lowerCamelCase, __lowerCamelCase) for token_ids in nested_token_ids):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.')
if any(
any((not isinstance(__lowerCamelCase, __lowerCamelCase) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.')
snake_case_ = DisjunctiveTrie(__lowerCamelCase)
snake_case_ = nested_token_ids
snake_case_ = self.trie.max_height
snake_case_ = []
snake_case_ = False
def a_ ( self) -> Dict:
snake_case_ = self.trie.next_tokens(self.current_seq)
if len(__lowerCamelCase) == 0:
return None
else:
return token_list
def a_ ( self, lowerCAmelCase__) -> Optional[int]:
if not isinstance(__lowerCamelCase, __lowerCamelCase):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCamelCase)}')
snake_case_ = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def a_ ( self, lowerCAmelCase__) -> Dict:
if not isinstance(__lowerCamelCase, __lowerCamelCase):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCamelCase)}')
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(__lowerCamelCase):
self.current_seq.append(__lowerCamelCase)
snake_case_ = True
else:
snake_case_ = True
self.reset()
snake_case_ = self.trie.reached_leaf(self.current_seq)
snake_case_ = completed
return stepped, completed, reset
def a_ ( self) -> List[Any]:
snake_case_ = False
snake_case_ = []
def a_ ( self) -> str:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def a_ ( self, lowerCAmelCase__=False) -> str:
snake_case_ = DisjunctiveConstraint(self.token_ids)
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.current_seq
snake_case_ = self.completed
return new_constraint
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Any:
snake_case_ = constraints
# max # of steps required to fulfill a given constraint
snake_case_ = max([c.seqlen for c in constraints])
snake_case_ = len(__lowerCamelCase)
snake_case_ = False
self.init_state()
def a_ ( self) -> Dict:
snake_case_ = []
snake_case_ = None
snake_case_ = [constraint.copy(stateful=__lowerCamelCase) for constraint in self.constraints]
def a_ ( self) -> List[str]:
snake_case_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def a_ ( self) -> int:
snake_case_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
snake_case_ = constraint.advance()
if isinstance(__lowerCamelCase, __lowerCamelCase):
token_list.append(__lowerCamelCase)
elif isinstance(__lowerCamelCase, __lowerCamelCase):
token_list.extend(__lowerCamelCase)
else:
snake_case_ = self.inprogress_constraint.advance()
if isinstance(__lowerCamelCase, __lowerCamelCase):
token_list.append(__lowerCamelCase)
elif isinstance(__lowerCamelCase, __lowerCamelCase):
token_list.extend(__lowerCamelCase)
if len(__lowerCamelCase) == 0:
return None
else:
return token_list
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
snake_case_ = self.add(__lowerCamelCase)
# the entire list of constraints are fulfilled
if self.completed:
break
def a_ ( self, lowerCAmelCase__) -> Optional[Any]:
if not isinstance(__lowerCamelCase, __lowerCamelCase):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.')
snake_case_ = False, False
if self.completed:
snake_case_ = True
snake_case_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
snake_case_ = self.inprogress_constraint.update(__lowerCamelCase)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCamelCase))
snake_case_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
snake_case_ = None
if len(self.pending_constraints) == 0:
# we're done!
snake_case_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(__lowerCamelCase):
snake_case_ = pending_constraint.update(__lowerCamelCase)
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(__lowerCamelCase)
snake_case_ = None
if not complete and stepped:
snake_case_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
snake_case_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
snake_case_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def a_ ( self, lowerCAmelCase__=True) -> Union[str, Any]:
snake_case_ = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
snake_case_ = [
constraint.copy(stateful=__lowerCamelCase) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
snake_case_ = self.inprogress_constraint.copy(stateful=__lowerCamelCase)
snake_case_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 69 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Any = logging.get_logger(__name__)
A__: List[str] = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = """pegasus"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: List[str] , __lowerCamelCase: Dict=5_0265 , __lowerCamelCase: int=1024 , __lowerCamelCase: Dict=12 , __lowerCamelCase: Dict=4096 , __lowerCamelCase: str=16 , __lowerCamelCase: List[Any]=12 , __lowerCamelCase: int=4096 , __lowerCamelCase: Optional[Any]=16 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: int=0.0 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: List[Any]="gelu" , __lowerCamelCase: List[Any]=1024 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Any=0.02 , __lowerCamelCase: Union[str, Any]=0 , __lowerCamelCase: str=False , __lowerCamelCase: Optional[int]=0 , __lowerCamelCase: Optional[Any]=1 , __lowerCamelCase: Optional[int]=1 , **__lowerCamelCase: Union[str, Any] , ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = vocab_size
UpperCamelCase__: List[Any] = max_position_embeddings
UpperCamelCase__: Tuple = d_model
UpperCamelCase__: str = encoder_ffn_dim
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = encoder_attention_heads
UpperCamelCase__: Tuple = decoder_ffn_dim
UpperCamelCase__: int = decoder_layers
UpperCamelCase__: List[str] = decoder_attention_heads
UpperCamelCase__: int = dropout
UpperCamelCase__: List[str] = attention_dropout
UpperCamelCase__: Tuple = activation_dropout
UpperCamelCase__: Optional[int] = activation_function
UpperCamelCase__: Dict = init_std
UpperCamelCase__: Optional[Any] = encoder_layerdrop
UpperCamelCase__: Any = decoder_layerdrop
UpperCamelCase__: Optional[int] = use_cache
UpperCamelCase__: Optional[int] = encoder_layers
UpperCamelCase__: List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return self.d_model
| 149 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger('transformers.models.speecht5')
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[Any]:
hf_model.apply_weight_norm()
snake_case : Union[str, Any] = checkpoint["""input_conv.weight_g"""]
snake_case : Tuple = checkpoint["""input_conv.weight_v"""]
snake_case : Tuple = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
snake_case : List[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
snake_case : List[Any] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
snake_case : Tuple = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case : int = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case : Optional[int] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case : Optional[Any] = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case : Any = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case : str = checkpoint["""output_conv.1.weight_g"""]
snake_case : Dict = checkpoint["""output_conv.1.weight_v"""]
snake_case : Union[str, Any] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ,) -> Tuple:
if config_path is not None:
snake_case : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
snake_case : List[Any] = SpeechTaHifiGanConfig()
snake_case : Union[str, Any] = SpeechTaHifiGan(lowercase )
snake_case : Tuple = torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,lowercase ,lowercase )
snake_case : Any = np.load(lowercase )
snake_case : Optional[Any] = stats[0].reshape(-1 )
snake_case : Any = stats[1].reshape(-1 )
snake_case : Optional[int] = torch.from_numpy(lowercase ).float()
snake_case : str = torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCamelCase : int = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 367 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a , )
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = RobertaConfig
UpperCAmelCase__ : str = '''roberta'''
def __init__( self : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__(_snake_case)
UpperCAmelCase_ = RobertaEmbeddings(_snake_case)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a , )
class __snake_case ( a ):
UpperCAmelCase__ : Any = RobertaConfig
UpperCAmelCase__ : Dict = '''roberta'''
def __init__( self : int , _snake_case : Tuple):
"""simple docstring"""
super().__init__(_snake_case)
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = config.num_hidden_layers
UpperCAmelCase_ = DeeRobertaModel(_snake_case)
UpperCAmelCase_ = nn.Dropout(config.hidden_dropout_prob)
UpperCAmelCase_ = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(_snake_case)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=None , _snake_case : Tuple=None , _snake_case : Any=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : int=None , _snake_case : Union[str, Any]=-1 , _snake_case : List[str]=False , ):
"""simple docstring"""
UpperCAmelCase_ = self.num_layers
try:
UpperCAmelCase_ = self.roberta(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , )
UpperCAmelCase_ = outputs[1]
UpperCAmelCase_ = self.dropout(_snake_case)
UpperCAmelCase_ = self.classifier(_snake_case)
UpperCAmelCase_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ = e.message
UpperCAmelCase_ = e.exit_layer
UpperCAmelCase_ = outputs[0]
if not self.training:
UpperCAmelCase_ = entropy(_snake_case)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ = MSELoss()
UpperCAmelCase_ = loss_fct(logits.view(-1) , labels.view(-1))
else:
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
UpperCAmelCase_ = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ = highway_exit[0]
if not self.training:
highway_logits_all.append(_snake_case)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ = MSELoss()
UpperCAmelCase_ = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(_snake_case)
if train_highway:
UpperCAmelCase_ = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ = (loss,) + outputs
if not self.training:
UpperCAmelCase_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 51 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : Dict = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Dict = '''FlavaImageProcessor'''
UpperCAmelCase__ : Dict = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _snake_case : List[str]=None , _snake_case : str=None , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
UpperCAmelCase_ = kwargs.pop('''feature_extractor''')
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.image_processor
def __call__( self : List[Any] , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Any , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
UpperCAmelCase_ = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case) , tensor_type=_snake_case)
def lowerCamelCase ( self : Any , *_snake_case : Optional[Any] , **_snake_case : int):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[int] , *_snake_case : int , **_snake_case : Dict):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _snake_case , )
return self.image_processor
| 51 | 1 |
'''simple docstring'''
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( __A ) -> Image:
_snake_case , _snake_case = image.size
_snake_case = 0
_snake_case = image.load()
for i in range(__A ):
for j in range(__A ):
_snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
_snake_case = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowercase : str = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 160 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase : Tuple = Accelerator()
lowercase : Optional[int] = (accelerator.state.process_index + 2, 10)
lowercase : Any = torch.randint(0, 10, shape).to(accelerator.device)
lowercase : Union[str, Any] = ""
lowercase : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 160 | 1 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> bool:
return str(lowerCAmelCase__ ) == str(lowerCAmelCase__ )[::-1]
def a__ ( lowerCAmelCase__ ) -> int:
return int(lowerCAmelCase__ ) + int(str(lowerCAmelCase__ )[::-1] )
def a__ ( lowerCAmelCase__ = 1_00_00 ) -> int:
UpperCAmelCase__ : Dict = []
for num in range(1 , lowerCAmelCase__ ):
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[Any] = num
while iterations < 50:
UpperCAmelCase__ : int = sum_reverse(lowerCAmelCase__ )
iterations += 1
if is_palindrome(lowerCAmelCase__ ):
break
else:
lychrel_nums.append(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[str]=13 , _A : Any=7 , _A : str=True , _A : Any=True , _A : Any=True , _A : Optional[int]=True , _A : int=99 , _A : Optional[int]=32 , _A : List[Any]=5 , _A : Optional[Any]=4 , _A : Dict=37 , _A : Any="gelu" , _A : str=0.1 , _A : int=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=16 , _A : List[Any]=2 , _A : str=0.0_2 , _A : Optional[Any]=False , _A : Any=True , _A : Dict="None" , _A : List[str]=3 , _A : List[str]=4 , _A : Tuple=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : List[Any] = relative_attention
UpperCAmelCase__ : int = position_biased_input
UpperCAmelCase__ : str = pos_att_type
UpperCAmelCase__ : Union[str, Any] = scope
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self : Dict , _A : Optional[int] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self : int , _A : int , _A : Any , _A : Tuple , _A : List[Any] , _A : str , _A : Union[str, Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A )[0]
UpperCAmelCase__ : List[Any] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : List[Any] , _A : Optional[Any] , _A : int , _A : List[Any] , _A : Optional[int] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = DebertaVaForMaskedLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , _A : str , _A : Any , _A : Any , _A : List[Any] , _A : Dict , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = DebertaVaForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def lowercase_ ( self : Any , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : Tuple , _A : Dict , _A : List[str] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = DebertaVaForTokenClassification(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str , _A : List[str] , _A : str , _A : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = DebertaVaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : int = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Any , _A : Tuple , _A : Optional[int] , _A : Optional[int] , _A : str , _A : List[str] , _A : Any , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaForMultipleChoice(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[str] = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DebertaVaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = DebertaVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase__ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 181 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> Optional[int]:
__lowerCamelCase = {}
__lowerCamelCase = {}
if prompt is not None:
__lowerCamelCase = prompt
if generate_kwargs is not None:
__lowerCamelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowerCamelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
__lowerCamelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any]=None ) -> Dict:
__lowerCamelCase = load_image(SCREAMING_SNAKE_CASE__ )
if prompt is not None:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'''Received an invalid text input, got - {type(SCREAMING_SNAKE_CASE__ )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
__lowerCamelCase = self.model.config.model_type
if model_type == "git":
__lowerCamelCase = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
__lowerCamelCase = self.tokenizer(text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids
__lowerCamelCase = [self.tokenizer.cls_token_id] + input_ids
__lowerCamelCase = torch.tensor(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
__lowerCamelCase = self.image_processor(images=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowerCamelCase = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
__lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__lowerCamelCase = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowerCamelCase = None
return model_inputs
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Optional[Any]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , SCREAMING_SNAKE_CASE__ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
__lowerCamelCase = None
if generate_kwargs is None:
__lowerCamelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowerCamelCase = model_inputs.pop(self.model.main_input_name )
__lowerCamelCase = self.model.generate(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return model_outputs
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
__lowerCamelCase = []
for output_ids in model_outputs:
__lowerCamelCase = {
'''generated_text''': self.tokenizer.decode(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , )
}
records.append(SCREAMING_SNAKE_CASE__ )
return records
| 339 |
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None:
if start is None:
__lowerCamelCase = 0
if end is None:
__lowerCamelCase = len(__lowerCAmelCase ) - 1
if start >= end:
return
__lowerCamelCase = (start + end) // 2
slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
if sequence[end] < sequence[mid]:
__lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end]
slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 339 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=400 , A__=True , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ):
A__ : Dict = size if size is not None else {"""height""": 18, """width""": 18}
A__ : Optional[Any] = parent
A__ : Dict = batch_size
A__ : Optional[Any] = num_channels
A__ : Dict = image_size
A__ : Dict = min_resolution
A__ : Optional[int] = max_resolution
A__ : List[str] = do_resize
A__ : Any = size
A__ : List[Any] = do_normalize
A__ : str = image_mean
A__ : int = image_std
def __A ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _a (UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = DPTImageProcessor if is_vision_available() else None
def __A ( self ):
A__ : str = DPTImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , """image_mean""" ) )
self.assertTrue(hasattr(__lowercase , """image_std""" ) )
self.assertTrue(hasattr(__lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowercase , """do_resize""" ) )
self.assertTrue(hasattr(__lowercase , """size""" ) )
def __A ( self ):
A__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
A__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __A ( self ):
A__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
A__ : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Tuple = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
A__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
A__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : Any = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
A__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
A__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : str = image_processing(__lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 192 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
snake_case_ = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
snake_case_ = requests.get(_A , headers=_A ).json()
snake_case_ = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_A ):
snake_case_ = requests.get(url + f"&page={i + 2}" , headers=_A ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
snake_case_ = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
snake_case_ = requests.get(_A , headers=_A ).json()
snake_case_ = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
snake_case_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_A ):
snake_case_ = requests.get(url + f"&page={i + 2}" , headers=_A ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = None
if token is not None:
snake_case_ = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
snake_case_ = requests.get(_A , headers=_A , allow_redirects=_A )
snake_case_ = result.headers["Location"]
snake_case_ = requests.get(_A , allow_redirects=_A )
snake_case_ = os.path.join(_A , f"{artifact_name}.zip" )
with open(_A , "wb" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
snake_case_ = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ = line[: line.index(": " )]
snake_case_ = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
snake_case_ = line[len("FAILED " ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
snake_case_ = line
if len(_A ) != len(_A ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` "
f"and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem." )
snake_case_ = None
if job_name and job_links:
snake_case_ = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
snake_case_ = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = []
snake_case_ = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = Counter()
counter.update([x[1] for x in logs] )
snake_case_ = counter.most_common()
snake_case_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = test.split("::" )[0]
if test.startswith("tests/models/" ):
snake_case_ = test.split("/" )[2]
else:
snake_case_ = None
return test
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
snake_case_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ = [x for x in logs if x[2] is not None]
snake_case_ = {x[2] for x in logs}
snake_case_ = {}
for test in tests:
snake_case_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ = counter.most_common()
snake_case_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ = sum(error_counts.values() )
if n_errors > 0:
snake_case_ = {"count": n_errors, "errors": error_counts}
snake_case_ = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = "| no. | error | status |"
snake_case_ = "|-:|:-|:-|"
snake_case_ = [header, sep]
for error in reduced_by_error:
snake_case_ = reduced_by_error[error]["count"]
snake_case_ = f"| {count} | {error[:100]} | |"
lines.append(_A )
return "\n".join(_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = "| model | no. of errors | major error | count |"
snake_case_ = "|-:|-:|-:|-:|"
snake_case_ = [header, sep]
for model in reduced_by_model:
snake_case_ = reduced_by_model[model]["count"]
snake_case_ , snake_case_ = list(reduced_by_model[model]["errors"].items() )[0]
snake_case_ = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
lowercase__ : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase__ : Optional[Any] = get_job_links(args.workflow_run_id, token=args.token)
lowercase__ : List[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase__ : Tuple = k.find(" / ")
lowercase__ : Union[str, Any] = k[index + len(" / ") :]
lowercase__ : Union[str, Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase__ : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase__ : List[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase__ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase__ : Union[str, Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase__ : int = reduce_by_error(errors)
lowercase__ : List[str] = reduce_by_model(errors)
lowercase__ : Tuple = make_github_table(reduced_by_error)
lowercase__ : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 187 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase__ : int = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class a__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase_ ( cls ) -> Tuple:
'''simple docstring'''
a = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def lowerCAmelCase_ ( cls ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a = FlaxBertModel(lowercase_ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase_ , repo_id="test-model-flax" , push_to_hub=lowercase_ , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F'''{key} not identical''' )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a = FlaxBertModel(lowercase_ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase_ , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowercase_ , use_auth_token=self._token )
a = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
a = flatten_dict(unfreeze(model.params ) )
a = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase_ , 1e-3 , msg=F'''{key} not identical''' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> List[Any]:
a = True
a = flatten_dict(modela.params)
a = flatten_dict(modela.params)
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key])) > 1e-4:
a = False
return models_are_equal
@require_flax
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = FlaxBertModel(lowercase_ )
a = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) )
with self.assertRaises(lowercase_ ):
a = FlaxBertModel.from_pretrained(lowercase_ )
a = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = FlaxBertModel(lowercase_ )
a = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase_ , lowercase_ ) , max_shard_size="10KB" )
with self.assertRaises(lowercase_ ):
a = FlaxBertModel.from_pretrained(lowercase_ )
a = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertTrue(check_models_equal(lowercase_ , lowercase_ ) )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = "bert"
a = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(lowercase_ ):
a = FlaxBertModel.from_pretrained(lowercase_ )
a = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = "bert"
a = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(lowercase_ ):
a = FlaxBertModel.from_pretrained(lowercase_ )
a = FlaxBertModel.from_pretrained(lowercase_ , subfolder=lowercase_ )
self.assertIsNotNone(lowercase_ )
| 351 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = RoFormerTokenizer
a : Tuple = RoFormerTokenizerFast
a : Dict = True
a : Optional[Any] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowerCAmelCase_ ( self , **A ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self , **A ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = "永和服装饰品有限公司,今天天气非常好"
a = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.get_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.get_rust_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 180 | 0 |
'''simple docstring'''
lowercase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : int = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase__ = [None] * 10000000
lowercase__ = True
lowercase__ = False
def UpperCamelCase( UpperCAmelCase_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : Union[str, Any] = chain(next_number(UpperCamelCase_ ) )
UpperCAmelCase : Optional[int] = number_chain
while number < 10_00_00_00:
UpperCAmelCase : Any = number_chain
number *= 10
return number_chain
def UpperCamelCase( UpperCAmelCase_ = 10_00_00_00 ):
for i in range(1 , UpperCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 151 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : List[str] =["""pixel_values"""]
def __init__( self : Dict , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int=PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Any , ):
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor
SCREAMING_SNAKE_CASE__ = resample
super().__init__(**UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[ChannelDimension] = None , **UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_image_size(UpperCAmelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = resize(UpperCAmelCase_ , (new_h, new_w) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
return image
def A_ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[ChannelDimension] = None , **UpperCAmelCase_ : List[str] ):
return rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[TensorType, str]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase_ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(UpperCAmelCase_ , size_divisor=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(UpperCAmelCase_ , scale=1 / 255 ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 176 | 0 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase="None", lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, ) -> List[Any]:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : int = batch_size
_lowercase : List[str] = seq_length
_lowercase : Optional[int] = is_training
_lowercase : List[str] = use_input_mask
_lowercase : List[str] = use_token_type_ids
_lowercase : Union[str, Any] = use_labels
_lowercase : Any = vocab_size
_lowercase : str = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : Any = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Dict = num_labels
_lowercase : Any = num_choices
_lowercase : Optional[int] = relative_attention
_lowercase : str = position_biased_input
_lowercase : List[str] = pos_att_type
_lowercase : Optional[int] = scope
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Tuple = None
if self.use_input_mask:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowercase : int = None
if self.use_token_type_ids:
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowercase : Tuple = None
_lowercase : Any = None
_lowercase : Dict = None
if self.use_labels:
_lowercase : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = self.get_config()
_lowercase : str = 3_00
return config
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()), [])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = DebertaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)[0]
_lowercase : Optional[Any] = model(lowerCamelCase, token_type_ids=lowerCamelCase)[0]
_lowercase : List[Any] = model(lowerCamelCase)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[int] = DebertaForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : str = self.num_labels
_lowercase : str = DebertaForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = DebertaForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = DebertaForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[int] = config_and_inputs
_lowercase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Dict = True
lowercase_ : Tuple = False
lowercase_ : str = False
lowercase_ : Union[str, Any] = False
lowercase_ : Any = False
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = DebertaModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = DebertaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Tuple = DebertaModel.from_pretrained('microsoft/deberta-base')
_lowercase : Optional[int] = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_lowercase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowercase : List[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase)[0]
# compare the actual values for a slice.
_lowercase : Union[str, Any] = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCamelCase, atol=1E-4), F'''{output[:, 1:4, 1:4]}''')
| 84 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowercase : List[str] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowerCamelCase, scheduler=lowerCamelCase)
@torch.no_grad()
def __call__( self, lowerCamelCase = 1, lowerCamelCase = None, lowerCamelCase = 0.0, lowerCamelCase = 50, lowerCamelCase = None, lowerCamelCase = "pil", lowerCamelCase = True, ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCamelCase):
_lowercase : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase, lowerCamelCase) and len(lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_lowercase : str = randn_tensor(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowerCamelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_lowercase : Union[str, Any] = self.unet(lowerCamelCase, lowerCamelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(
lowerCamelCase, lowerCamelCase, lowerCamelCase, eta=lowerCamelCase, use_clipped_model_output=lowerCamelCase, generator=lowerCamelCase).prev_sample
_lowercase : Any = (image / 2 + 0.5).clamp(0, 1)
_lowercase : str = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase : Optional[int] = self.numpy_to_pil(lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase)
| 84 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __A ( a_ :List[Any]) -> Any:
__a : Optional[int] = SwinvaConfig()
__a : Optional[Any] = swinva_name.split('''_''')
__a : str = name_split[1]
if "to" in name_split[3]:
__a : Any = int(name_split[3][-3:])
else:
__a : str = int(name_split[3])
if "to" in name_split[2]:
__a : str = int(name_split[2][-2:])
else:
__a : Union[str, Any] = int(name_split[2][6:])
if model_size == "tiny":
__a : str = 96
__a : List[Any] = (2, 2, 6, 2)
__a : Any = (3, 6, 12, 24)
elif model_size == "small":
__a : int = 96
__a : int = (2, 2, 18, 2)
__a : List[Any] = (3, 6, 12, 24)
elif model_size == "base":
__a : List[str] = 1_28
__a : List[Any] = (2, 2, 18, 2)
__a : str = (4, 8, 16, 32)
else:
__a : str = 1_92
__a : List[str] = (2, 2, 18, 2)
__a : List[str] = (6, 12, 24, 48)
if "to" in swinva_name:
__a : Tuple = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__a : str = 2_18_41
__a : Any = '''huggingface/label-files'''
__a : Any = '''imagenet-22k-id2label.json'''
__a : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : Any = {int(a_): v for k, v in idalabel.items()}
__a : Dict = idalabel
__a : Dict = {v: k for k, v in idalabel.items()}
else:
__a : List[Any] = 10_00
__a : Any = '''huggingface/label-files'''
__a : Union[str, Any] = '''imagenet-1k-id2label.json'''
__a : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : Optional[Any] = {int(a_): v for k, v in idalabel.items()}
__a : Dict = idalabel
__a : Optional[Any] = {v: k for k, v in idalabel.items()}
__a : Any = img_size
__a : Tuple = num_classes
__a : str = embed_dim
__a : List[str] = depths
__a : Dict = num_heads
__a : List[Any] = window_size
return config
def __A ( a_ :Optional[int]) -> Dict:
if "patch_embed.proj" in name:
__a : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "patch_embed.norm" in name:
__a : Any = name.replace('''patch_embed.norm''' , '''embeddings.norm''')
if "layers" in name:
__a : Optional[int] = '''encoder.''' + name
if "attn.proj" in name:
__a : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
__a : Dict = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
__a : List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
__a : Tuple = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
__a : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
__a : str = name.replace('''mlp.fc2''' , '''output.dense''')
if "q_bias" in name:
__a : Any = name.replace('''q_bias''' , '''query.bias''')
if "k_bias" in name:
__a : Tuple = name.replace('''k_bias''' , '''key.bias''')
if "v_bias" in name:
__a : List[Any] = name.replace('''v_bias''' , '''value.bias''')
if "cpb_mlp" in name:
__a : Union[str, Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''')
if name == "norm.weight":
__a : Union[str, Any] = '''layernorm.weight'''
if name == "norm.bias":
__a : Optional[Any] = '''layernorm.bias'''
if "head" in name:
__a : Optional[int] = name.replace('''head''' , '''classifier''')
else:
__a : Optional[Any] = '''swinv2.''' + name
return name
def __A ( a_ :Dict , a_ :Dict) -> Dict:
for key in orig_state_dict.copy().keys():
__a : Optional[int] = orig_state_dict.pop(a_)
if "mask" in key:
continue
elif "qkv" in key:
__a : Dict = key.split('''.''')
__a : Union[str, Any] = int(key_split[1])
__a : List[str] = int(key_split[3])
__a : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__a : int = val[:dim, :]
__a : Any = val[dim : dim * 2, :]
__a : Dict = val[-dim:, :]
else:
__a : str = val[:dim]
__a : Optional[int] = val[
dim : dim * 2
]
__a : List[Any] = val[-dim:]
else:
__a : Any = val
return orig_state_dict
def __A ( a_ :Tuple , a_ :int) -> Union[str, Any]:
__a : Dict = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
__a : int = get_swinva_config(a_)
__a : int = SwinvaForImageClassification(a_)
model.eval()
__a : Dict = convert_state_dict(timm_model.state_dict() , a_)
model.load_state_dict(a_)
__a : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : int = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''')))
__a : Optional[Any] = Image.open(requests.get(a_ , stream=a_).raw)
__a : Optional[Any] = image_processor(images=a_ , return_tensors='''pt''')
__a : Optional[Any] = timm_model(inputs['''pixel_values'''])
__a : int = model(**a_).logits
assert torch.allclose(a_ , a_ , atol=1e-3)
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
model.push_to_hub(
repo_path_or_name=Path(a_ , a_) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 160 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowercase: Optional[int] = get_tests_dir("fixtures/dummy-config.json")
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 0
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
a = os.path.join(lowerCamelCase_ , "fake-roberta" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
a = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(type(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , lowerCamelCase_ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoConfig.register("model" , lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoConfig.register("bert" , lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
a = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ )
a = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
a = AutoConfig.from_pretrained("bert-base" )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a = AutoConfig.from_pretrained(lowerCamelCase_ , revision="aaaaaa" )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase_ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
a = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCamelCase_ )
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ )
a = AutoConfig.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def UpperCamelCase_ (self ):
"""simple docstring"""
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "new-model"
try:
AutoConfig.register("new-model" , lowerCamelCase_ )
# If remote code is not set, the default is to use local
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCamelCase_ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 367 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger()
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase_ , nn.Convad ) or isinstance(lowerCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase_ )
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = 42
__A = 1
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
__A = True
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
a = Tracker(self.dest )(lowerCamelCase_ ).parametrized
a = Tracker(self.src )(lowerCamelCase_ ).parametrized
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.src_skip , lowerCamelCase_ ) )
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.dest_skip , lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase_ )} operations while'''
F''' destination module has {len(lowerCamelCase_ )}.''' )
for dest_m, src_m in zip(lowerCamelCase_ , lowerCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
a = len(lowerCamelCase_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
a = nn.ModuleDict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return get_trunk_forward_outputs(
lowerCamelCase_ , out_feat_keys=lowerCamelCase_ , feature_blocks=self._feature_blocks , )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if x not in self:
a = self.convert_name_to_timm(lowerCamelCase_ )
a = partial(lambda: (timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval(), None) )
else:
a = super().__getitem__(lowerCamelCase_ )
return val
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
a = RegNetModel
else:
a = RegNetForImageClassification
return val
def a( A : Dict , A : List[Any] , A : List[Tuple[str, str]] ) -> Union[str, Any]:
"""simple docstring"""
for from_key, to_key in keys:
a = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def a( A : str , A : Callable[[], nn.Module] , A : Callable[[], nn.Module] , A : RegNetConfig , A : Path , A : bool = True , ) -> List[str]:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
a , a = from_model_func()
a = our_model_func(A ).eval()
a = ModuleTransfer(src=A , dest=A , raise_if_mismatch=A )
a = torch.randn((1, 3, 224, 224) )
module_transfer(A )
if from_state_dict is not None:
a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
a = manually_copy_vissl_head(A , our_model.state_dict() , A )
our_model.load_state_dict(A )
a = our_model(A , output_hidden_states=A )
a = (
our_outputs.logits if isinstance(A , A ) else our_outputs.last_hidden_state
)
a = from_model(A )
a = from_output[-1] if type(A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a = our_outputs.hidden_states[-1]
assert torch.allclose(A , A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=A , )
a = 224 if "seer" not in name else 384
# we can use the convnext one
a = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=A , )
print(f'''Pushed {name}''' )
def a( A : Path , A : str = None , A : bool = True ) -> Dict:
"""simple docstring"""
a = "imagenet-1k-id2label.json"
a = 1000
a = (1, num_labels)
a = "huggingface/label-files"
a = num_labels
a = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = partial(A , num_labels=A , idalabel=A , labelaid=A )
a = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
a = NameToOurModelFuncMap()
a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(A : str , A : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a = torch.hub.load_state_dict_from_url(A , model_dir=str(A ) , map_location="cpu" )
a = model_func()
# check if we have a head, if yes add it
a = files["classy_state_dict"]["base_model"]["model"]
a = model_state_dict["trunk"]
model.load_state_dict(A )
return model.eval(), model_state_dict["heads"]
# pretrained
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A , A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A , A , A , )
return config, expected_shape
if __name__ == "__main__":
_lowercase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowercase: Optional[int] = parser.parse_args()
_lowercase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 71 | 0 |
from __future__ import annotations
def A ( _UpperCAmelCase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [True] * limit
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
_UpperCAmelCase = i * 2
while index < limit:
_UpperCAmelCase = False
_UpperCAmelCase = index + i
_UpperCAmelCase = [2]
for i in range(3 , _UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def A ( _UpperCAmelCase : int = 1_000_000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = prime_sieve(_UpperCAmelCase )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + length , len(_UpperCAmelCase ) ):
_UpperCAmelCase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_UpperCAmelCase = j - i
_UpperCAmelCase = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339 |
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339 | 1 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case_( a__ ):
def __init__( self : Tuple , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : List[Any] , ):
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : List[str] = field
lowerCAmelCase : int = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
lowerCAmelCase : Dict = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase : List[str] = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
lowerCAmelCase : Optional[Any] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class snake_case_:
def __init__( self : Optional[Any] , UpperCamelCase_ : Dataset , UpperCamelCase_ : Union[PathLike, BinaryIO] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowerCAmelCase : List[Any] = dataset
lowerCAmelCase : Optional[Any] = path_or_buf
lowerCAmelCase : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase : Union[str, Any] = num_proc
lowerCAmelCase : Dict = '''utf-8'''
lowerCAmelCase : Union[str, Any] = to_json_kwargs
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
lowerCAmelCase : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
lowerCAmelCase : List[str] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
lowerCAmelCase : Dict = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
lowerCAmelCase : Optional[Any] = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer:
lowerCAmelCase : Optional[int] = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
lowerCAmelCase : List[str] = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = args
lowerCAmelCase : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase : int = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : BinaryIO , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
lowerCAmelCase : Optional[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
lowerCAmelCase, lowerCAmelCase : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 314 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314 | 1 |
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def lowerCamelCase_ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
lowerCamelCase_ = prime_sieve(lowerCamelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 19 | from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Any:
_A = []
for part_id in partition_order:
_A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''').collect()
for row_idx, row in enumerate(snake_case__):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Optional[Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(2)
_A = [1, 0]
_A = _generate_iterable_examples(snake_case__ , snake_case__) # Reverse the partitions.
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__)
for i, (row_id, row_dict) in enumerate(generate_fn()):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> int:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(1)
_A = SparkExamplesIterable(snake_case__)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""") as generator_mock:
_A = lambda snake_case__: x.reverse()
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0])
_A = SparkExamplesIterable(snake_case__).shuffle_data_sources(snake_case__)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> List[str]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(20).repartition(4)
# Partitions 0 and 2
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Tuple:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 180 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase_ ( _lowercase : Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int, ...]]:
"""simple docstring"""
a__ : Tuple = []
if isinstance(_lowercase , _lowercase):
for v in tree.values():
shapes.extend(_fetch_dims(_lowercase))
elif isinstance(_lowercase , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(_lowercase))
elif isinstance(_lowercase , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("""Not supported""")
return shapes
@torch.jit.ignore
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Tuple[int, ...]) -> Tuple[int, ...]:
"""simple docstring"""
a__ : Union[str, Any] = []
for d in reversed(_lowercase):
idx.append(flat_idx % d)
a__ : List[Any] = flat_idx // d
return tuple(reversed(_lowercase))
@torch.jit.ignore
def lowerCAmelCase_ ( _lowercase : Sequence[int] , _lowercase : Sequence[int] , _lowercase : Sequence[int] , _lowercase : Optional[Sequence[bool]] = None , _lowercase : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
"""simple docstring"""
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_lowercase : List[bool]) -> None:
a__ : int = True
for i in range(len(_lowercase)):
a__ : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : str = l[reversed_idx]
if start_edges is None:
a__ : Tuple = [s == 0 for s in start]
reduce_edge_list(_lowercase)
if end_edges is None:
a__ : List[str] = [e == (d - 1) for e, d in zip(_lowercase , _lowercase)]
reduce_edge_list(_lowercase)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowercase) == 0:
return [()]
elif len(_lowercase) == 1:
return [(slice(start[0] , end[0] + 1),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowercase , _lowercase):
if s == e:
path_list.append(slice(_lowercase , s + 1))
else:
break
a__ : Tuple[slice, ...] = tuple(_lowercase)
a__ : Union[str, Any] = len(_lowercase)
# start == end, and we're done
if divergence_idx == len(_lowercase):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(_lowercase , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Dict = end[divergence_idx]
return tuple(
path + (slice(_lowercase , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
a__ : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def lowerCAmelCase_ ( _lowercase : torch.Tensor , _lowercase : int , _lowercase : int , _lowercase : int) -> torch.Tensor:
"""simple docstring"""
a__ : str = t.shape[:no_batch_dims]
a__ : List[Any] = list(_flat_idx_to_idx(_lowercase , _lowercase))
# _get_minimal_slice_set is inclusive
a__ : List[str] = list(_flat_idx_to_idx(flat_end - 1 , _lowercase))
# Get an ordered list of slices to perform
a__ : Dict = _get_minimal_slice_set(
_lowercase , _lowercase , _lowercase , )
a__ : Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def lowerCAmelCase_ ( _lowercase : Callable , _lowercase : Dict[str, Any] , _lowercase : int , _lowercase : int , _lowercase : bool = False , _lowercase : Any = None , _lowercase : bool = False , ) -> Any:
"""simple docstring"""
if not (len(_lowercase) > 0):
raise ValueError("""Must provide at least one input""")
a__ : Optional[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(_lowercase)]
a__ : Union[str, Any] = tuple([max(_lowercase) for s in zip(*_lowercase)])
def _prep_inputs(_lowercase : torch.Tensor) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
a__ : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
a__ : int = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
a__ : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , _lowercase)
a__ : Any = None
if _out is not None:
a__ : List[str] = tensor_tree_map(lambda _lowercase: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
a__ : Tuple = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : str = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowercase : torch.Tensor) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : List[Any] = 0
a__ : int = prepped_outputs
for _ in range(_lowercase):
# Chunk the input
if not low_mem:
a__ : Union[str, Any] = _select_chunk
else:
a__ : Any = partial(
_chunk_slice , flat_start=_lowercase , flat_end=min(_lowercase , i + chunk_size) , no_batch_dims=len(_lowercase) , )
a__ : Dict[str, Any] = tensor_tree_map(_lowercase , _lowercase)
# Run the layer on the chunk
a__ : str = layer(**_lowercase)
# Allocate space for the output
if out is None:
a__ : int = tensor_tree_map(lambda _lowercase: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , _lowercase)
# Put the chunk in its pre-allocated space
if isinstance(_lowercase , _lowercase):
def assign(_lowercase : dict , _lowercase : dict) -> None:
for k, v in da.items():
if isinstance(_lowercase , _lowercase):
assign(_lowercase , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Any = da[k]
assign(_lowercase , _lowercase)
elif isinstance(_lowercase , _lowercase):
for xa, xa in zip(_lowercase , _lowercase):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(_lowercase , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : int = output_chunk
else:
raise ValueError("""Not supported""")
i += chunk_size
a__ : Any = tensor_tree_map(lambda _lowercase: t.view(orig_batch_dims + t.shape[1:]) , _lowercase)
return out
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase = 5_1_2 , ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : Optional[int] = [c for c in candidates if c > min_chunk_size]
a__ : Union[str, Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__lowercase ) -> bool:
try:
with torch.no_grad():
fn(*__lowercase , chunk_size=__lowercase )
return True
except RuntimeError:
return False
a__ : Optional[Any] = 0
a__ : Union[str, Any] = len(__lowercase ) - 1
while i > min_viable_chunk_size_index:
a__ : Optional[int] = test_chunk_size(candidates[i] )
if not viable:
a__ : int = (min_viable_chunk_size_index + i) // 2
else:
a__ : Dict = i
a__ : List[str] = (i + len(__lowercase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(__lowercase , __lowercase ):
assert type(__lowercase ) == type(__lowercase )
if isinstance(__lowercase , (list, tuple) ):
consistent &= self._compare_arg_caches(__lowercase , __lowercase )
elif isinstance(__lowercase , __lowercase ):
a__ : List[str] = [v for _, v in sorted(aa.items() , key=lambda __lowercase : x[0] )]
a__ : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda __lowercase : x[0] )]
consistent &= self._compare_arg_caches(__lowercase , __lowercase )
else:
consistent &= aa == aa
return consistent
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = True
a__ : tuple = tree_map(lambda __lowercase : a.shape if isinstance(__lowercase , torch.Tensor ) else a , __lowercase , __lowercase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__lowercase )
a__ : Tuple = self._compare_arg_caches(self.cached_arg_data , __lowercase )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : Optional[Any] = self._determine_favorable_chunk_size(
__lowercase , __lowercase , __lowercase , )
a__ : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 365 |
import torch
from diffusers import StableDiffusionPipeline
_lowercase : Optional[int] ="path-to-your-trained-model"
_lowercase : Union[str, Any] =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_lowercase : Any ="A photo of sks dog in a bucket"
_lowercase : Any =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 266 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCAmelCase = {'UserAgent': UserAgent().random}
def _snake_case ( lowercase__ : List[Any] ) -> dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = script.contents[0]
lowerCAmelCase_ :Dict = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> List[Any]:
lowerCAmelCase_ :Tuple = f"""https://www.instagram.com/{username}/"""
lowerCAmelCase_ :Any = self.get_json()
def __lowerCAmelCase ( self ) -> dict:
lowerCAmelCase_ :Optional[Any] = requests.get(self.url , headers=__A ).text
lowerCAmelCase_ :List[Any] = BeautifulSoup(__A , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ) -> str:
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ) -> bool:
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ) -> bool:
return self.user_data["is_private"]
def _snake_case ( lowercase__ : str = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowerCAmelCase_ :Dict = InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 84 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """encodec"""
def __init__( self : int , UpperCamelCase__ : Optional[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase__ : List[str]=2_4000 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=128 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any=[8, 5, 4, 2] , UpperCamelCase__ : Optional[Any]="weight_norm" , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]="reflect" , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : Dict=1024 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = target_bandwidths
SCREAMING_SNAKE_CASE : int = sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = audio_channels
SCREAMING_SNAKE_CASE : List[str] = normalize
SCREAMING_SNAKE_CASE : Any = chunk_length_s
SCREAMING_SNAKE_CASE : Optional[int] = overlap
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_filters
SCREAMING_SNAKE_CASE : int = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Dict = norm_type
SCREAMING_SNAKE_CASE : List[Any] = kernel_size
SCREAMING_SNAKE_CASE : int = last_kernel_size
SCREAMING_SNAKE_CASE : str = residual_kernel_size
SCREAMING_SNAKE_CASE : int = dilation_growth_rate
SCREAMING_SNAKE_CASE : List[Any] = use_causal_conv
SCREAMING_SNAKE_CASE : List[Any] = pad_mode
SCREAMING_SNAKE_CASE : str = compress
SCREAMING_SNAKE_CASE : Dict = num_lstm_layers
SCREAMING_SNAKE_CASE : List[str] = trim_right_ratio
SCREAMING_SNAKE_CASE : Optional[int] = codebook_size
SCREAMING_SNAKE_CASE : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : List[str] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCamelCase__ )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _lowercase ):
return (data["data"], data["target"])
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_lowercase , _lowercase )
# Predict target for test data
SCREAMING_SNAKE_CASE : Optional[int] = xgb.predict(_lowercase )
SCREAMING_SNAKE_CASE : Dict = predictions.reshape(len(_lowercase ) , 1 )
return predictions
def A ( ):
SCREAMING_SNAKE_CASE : str = fetch_california_housing()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = data_handling(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = train_test_split(
_lowercase , _lowercase , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE : Any = xgboost(_lowercase , _lowercase , _lowercase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_lowercase , _lowercase )}""" )
print(f"""Mean Square Error : {mean_squared_error(_lowercase , _lowercase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 258 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def lowerCAmelCase_ ( A_):
if num <= 0:
raise ValueError("math domain error")
return quad(a_ ,0 ,a_ ,args=(a_))[0]
def lowerCAmelCase_ ( A_ ,A_):
return math.pow(a_ ,z - 1) * math.exp(-x)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 149 |
def A ( a_ ) -> int:
__UpperCamelCase : Any =len(a_ )
while cur > 1:
# Find the maximum number in arr
__UpperCamelCase : Any =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCamelCase : Any =arr[mi::-1] + arr[mi + 1 : len(a_ )]
# Reverse whole list
__UpperCamelCase : str =arr[cur - 1 :: -1] + arr[cur : len(a_ )]
cur -= 1
return arr
if __name__ == "__main__":
A_ :Dict = input('''Enter numbers separated by a comma:\n''').strip()
A_ :Any = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 71 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
UpperCAmelCase_: List[str] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
UpperCAmelCase_: str = tokenizer("""Hello there""", return_tensors="""tf""" ).input_ids
UpperCAmelCase_: Dict = tokenizer("""Hi I am""", return_tensors="""tf""" ).input_ids
UpperCAmelCase_: Tuple = model(_A, labels=_A ).loss
UpperCAmelCase_: List[Any] = -tf.math.reduce_mean(_A ).numpy()
UpperCAmelCase_: Dict = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 371 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _a ( unittest.TestCase , _lowerCAmelCase ):
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[int] = load_tool("""text-classification""" )
self.tool.setup()
UpperCAmelCase_: str = load_tool("""text-classification""", remote=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = self.remote_tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Tuple = self.tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = self.remote_tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
| 82 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , _A , )
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image[0].size
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE__ = np.concatenate(_A , axis=0 )
SCREAMING_SNAKE_CASE__ = np.array(_A ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(_A , dim=0 )
return image
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if isinstance(_A , torch.Tensor ):
return mask
elif isinstance(_A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mask[0].size
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE__ = np.concatenate(_A , axis=0 )
SCREAMING_SNAKE_CASE__ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_A )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = torch.cat(_A , dim=0 )
return mask
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = 42
def __init__( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ) -> List[str]:
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , __lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCamelCase : Union[torch.Tensor, PIL.Image.Image] , __lowerCamelCase : int = 250 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 10 , __lowerCamelCase : int = 10 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = _preprocess_image(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = _preprocess_mask(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__lowerCamelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = original_image.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.device )
SCREAMING_SNAKE_CASE__ = eta
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE__ = generator[0] if isinstance(__lowerCamelCase , __lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE__ = self.scheduler.undo_step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = t
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 314 |
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
SCREAMING_SNAKE_CASE__ = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = np.array(_A )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# compute the shape of the output matrix
SCREAMING_SNAKE_CASE__ = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
SCREAMING_SNAKE_CASE__ = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
SCREAMING_SNAKE_CASE__ = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 314 | 1 |
"""simple docstring"""
from math import ceil
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(0 , __lowerCAmelCase ) )
__lowerCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(__lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__lowerCAmelCase )
# Missing blocks
__lowerCAmelCase = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(__lowerCAmelCase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__lowerCAmelCase ) )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(range(__lowerCAmelCase ) )
__lowerCAmelCase = int(ceil(n_layers / len(__lowerCAmelCase ) ) )
__lowerCAmelCase = [layers[i : i + n_blocks] for i in range(0 , __lowerCAmelCase , __lowerCAmelCase )]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
| 350 |
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for i in sequence:
__lowerCAmelCase = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def _lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
__lowerCAmelCase = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCamelCase )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=_UpperCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 259 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a_ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a_ = F"""https://www.google.com/search?q={query}&num=100"""
a_ = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a_ = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a_ = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 175 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = ["input_features", "attention_mask"]
def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase )
__A = num_mel_bins
__A = do_ceptral_normalize
__A = normalize_means
__A = normalize_vars
__A = True
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ):
'''simple docstring'''
__A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
__A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ):
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
__A = x[:input_length].mean(axis=0 )
__A = np.subtract(_lowerCamelCase, _lowerCamelCase )
if normalize_vars:
__A = x[:input_length].std(axis=0 )
__A = np.divide(_lowerCamelCase, _lowerCamelCase )
if input_length < x.shape[0]:
__A = padding_value
# make sure array is in float32
__A = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ):
'''simple docstring'''
__A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(_lowerCamelCase, _lowerCamelCase )
]
def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__A = is_batched_numpy or (
isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ):
__A = np.asarray(_lowerCamelCase, dtype=np.floataa )
elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A = [raw_speech]
# extract fbank features
__A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__A = BatchFeature({'''input_features''': features} )
__A = self.pad(
_lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, )
# make sure list is in array format
__A = padded_inputs.get('''input_features''' )
if isinstance(input_features[0], _lowerCamelCase ):
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features]
__A = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__A = (
np.array(_lowerCamelCase, dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A = self.normalize(
padded_inputs['''input_features'''], attention_mask=_lowerCamelCase )
if return_tensors is not None:
__A = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 266 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : int= False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Optional[Any]) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int) -> Tuple:
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : List[Any] = torch.manual_seed(0)
__snake_case : Optional[int] = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A)
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = generator.manual_seed(0)
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase (self : Optional[Any]) -> Optional[int]:
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = 'cyberpunk 2077'
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : str = torch.manual_seed(0)
__snake_case : Union[str, Any] = pipe.dual_guided(
prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Tuple = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0)
__snake_case : str = pipe.text_to_image(
prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images
__snake_case : str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = pipe.image_variation(_A , generator=_A , output_type='numpy').images
__snake_case : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : List[Any] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 95 | 1 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( UpperCAmelCase , UpperCAmelCase = "cpu" , UpperCAmelCase = None ) ->None:
"""simple docstring"""
A = torch.load(UpperCAmelCase , map_location=UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
A = v.half()
if save_path is None: # overwrite src_path
A = src_path
torch.save(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 258 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
lowerCAmelCase__ = 'decision_transformer'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=17 , lowercase=4 , lowercase=128 , lowercase=4096 , lowercase=True , lowercase=1 , lowercase=1024 , lowercase=3 , lowercase=1 , lowercase=None , lowercase="relu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.0_2 , lowercase=True , lowercase=True , lowercase=50256 , lowercase=50256 , lowercase=False , lowercase=False , **lowercase , ) -> Optional[int]:
lowerCamelCase_ = state_dim
lowerCamelCase_ = act_dim
lowerCamelCase_ = hidden_size
lowerCamelCase_ = max_ep_len
lowerCamelCase_ = action_tanh
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = n_inner
lowerCamelCase_ = activation_function
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scale_attn_weights
lowerCamelCase_ = use_cache
lowerCamelCase_ = scale_attn_by_inverse_layer_idx
lowerCamelCase_ = reorder_and_upcast_attn
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 356 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = AlbertTokenizer
lowerCAmelCase__ = AlbertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = AlbertTokenizer(lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = "this is a test"
lowerCamelCase_ = "this is a test"
return input_text, output_text
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = "<pad>"
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(lowercase ) , 30000 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = "I was born in 92000, and this is falsé."
lowerCamelCase_ = tokenizer.tokenize(lowercase )
lowerCamelCase_ = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCamelCase_ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
lowerCamelCase_ = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(lowercase )
lowerCamelCase_ = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = AlbertTokenizer(lowercase , keep_accents=lowercase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [48, 25, 21, 1289] )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(lowercase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = AlbertTokenizer(lowercase )
lowerCamelCase_ = tokenizer.encode("sequence builders" )
lowerCamelCase_ = tokenizer.encode("multi-sequence build" )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
# fmt: off
lowerCamelCase_ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 47 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
_UpperCamelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A__ = logging.getLogger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''summarization'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ROUGE_KEYS
__lowerCamelCase = '''rouge2'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_snake_case , num_labels=_snake_case , mode=self.mode , **_snake_case )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
_lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
_lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
_lowerCAmelCase = 0
_lowerCAmelCase = defaultdict(_snake_case )
_lowerCAmelCase = self.config.model_type
_lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
_lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
_lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_lowerCAmelCase = get_git_info()["""repo_sha"""]
_lowerCAmelCase = hparams.num_workers
_lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _snake_case ):
_lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowerCAmelCase = self.decoder_start_token_id
_lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
_lowerCAmelCase = False
_lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowerCAmelCase = self.hparams.eval_max_gen_length
else:
_lowerCAmelCase = self.model.config.max_length
_lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
_lowerCAmelCase = True
return readable_batch
def snake_case ( self , _snake_case , **_snake_case ):
"""simple docstring"""
return self.model(_snake_case , **_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.pad_token_id
_lowerCAmelCase , _lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
_lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , _snake_case ):
_lowerCAmelCase = self.model._shift_right(_snake_case )
else:
_lowerCAmelCase = shift_tokens_right(_snake_case , _snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowerCAmelCase = decoder_input_ids
self.save_readable_batch(_snake_case )
_lowerCAmelCase = self(_snake_case , attention_mask=_snake_case , decoder_input_ids=_snake_case , use_cache=_snake_case )
_lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
_lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_lowerCAmelCase = nn.functional.log_softmax(_snake_case , dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = label_smoothed_nll_loss(
_snake_case , _snake_case , self.hparams.label_smoothing , ignore_index=_snake_case )
return (loss,)
@property
def snake_case ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
# tokens per batch
_lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].shape[0]
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case , _snake_case="val" ):
"""simple docstring"""
self.step_count += 1
_lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_lowerCAmelCase = losses["""loss"""]
_lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
_lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowerCAmelCase = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
_lowerCAmelCase = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
_lowerCAmelCase = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
_lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_rouge(_snake_case , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=_snake_case , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
_lowerCAmelCase = self.ids_to_clean_text(_snake_case )
_lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
_lowerCAmelCase = self.calc_generative_metrics(_snake_case , _snake_case )
_lowerCAmelCase = np.mean(lmap(_snake_case , _snake_case ) )
base_metrics.update(gen_time=_snake_case , gen_len=_snake_case , preds=_snake_case , target=_snake_case , **_snake_case )
return base_metrics
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.validation_epoch_end(_snake_case , prefix="""test""" )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.n_obs[type_path]
_lowerCAmelCase = self.target_lens[type_path]
_lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=_snake_case , n_obs=_snake_case , max_target_length=_snake_case , **self.dataset_kwargs , )
return dataset
def snake_case ( self , _snake_case , _snake_case , _snake_case = False ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_sortish_sampler(_snake_case , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_sampler=_snake_case , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=_snake_case )
return dataloader
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case ( _snake_case , _snake_case ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_snake_case , _snake_case )
add_generic_args(_snake_case , _snake_case )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--max_tokens_per_batch""" , type=_snake_case , default=_snake_case )
parser.add_argument("""--logger_name""" , type=_snake_case , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=_snake_case , default=500 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=_snake_case , default="""summarization""" , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=_snake_case , default=0.0 , required=_snake_case )
parser.add_argument("""--src_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--tgt_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--eval_beams""" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
"""--val_metric""" , type=_snake_case , default=_snake_case , required=_snake_case , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=_snake_case , default=_snake_case , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=_snake_case , default=1 , required=_snake_case , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=_snake_case , default=-1 , required=_snake_case , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''translation'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ['''bleu''']
__lowerCamelCase = '''bleu'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(_snake_case , **_snake_case )
_lowerCAmelCase = hparams.src_lang
_lowerCAmelCase = hparams.tgt_lang
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_bleu(_snake_case , _snake_case )
def _UpperCAmelCase ( snake_case , snake_case=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=snake_case )
check_output_dir(snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
_lowerCAmelCase = SummarizationModule(snake_case )
else:
_lowerCAmelCase = TranslationModule(snake_case )
_lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
_lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , snake_case )
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
_lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_lowerCAmelCase = False
_lowerCAmelCase = args.val_metric == """loss"""
_lowerCAmelCase = generic_train(
snake_case , snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , snake_case ) , early_stopping_callback=snake_case , logger=snake_case , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
_lowerCAmelCase = """"""
_lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=snake_case ) )
if checkpoints:
_lowerCAmelCase = checkpoints[-1]
_lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(parser)
A__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A__ = parser.parse_args()
main(args)
| 82 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase : Optional[Any] = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['ChineseCLIPFeatureExtractor']
UpperCAmelCase : str = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase__ = 10000
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase__ = ParquetConfig
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) )
break
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' )
raise
| 331 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 100 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[str] = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase__ : Dict = pre_numerator
lowerCAmelCase__ : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase__ : Tuple = cur_numerator
lowerCAmelCase__ : Optional[Any] = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 37 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='git_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :List[str] = num_channels
UpperCamelCase :Optional[int] = patch_size
UpperCamelCase :Optional[int] = image_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = attention_dropout
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
UpperCamelCase :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='git'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=101 , SCREAMING_SNAKE_CASE_=102 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
UpperCamelCase :Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
UpperCamelCase :Union[str, Any] = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[str] = intermediate_size
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = max_position_embeddings
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Any = layer_norm_eps
UpperCamelCase :int = position_embedding_type
UpperCamelCase :Dict = use_cache
UpperCamelCase :Tuple = tie_word_embeddings
UpperCamelCase :Union[str, Any] = num_image_with_embedding
UpperCamelCase :Optional[int] = bos_token_id
UpperCamelCase :List[Any] = eos_token_id
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase :Optional[int] = self.vision_config.to_dict()
UpperCamelCase :int = self.__class__.model_type
return output
| 259 | 0 |
"""simple docstring"""
import itertools
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase () -> Union[str, Any]:
lowercase__ = 2
while True:
if is_prime(_SCREAMING_SNAKE_CASE ):
yield num
num += 1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 354 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = 'xlnet'
_UpperCamelCase : Optional[Any] = ['mems']
_UpperCamelCase : Union[str, Any] = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , a : int=32_000 , a : Any=1_024 , a : Optional[Any]=24 , a : str=16 , a : int=4_096 , a : List[str]="gelu" , a : Any=True , a : Dict="bi" , a : str=0.02 , a : List[str]=1E-1_2 , a : Tuple=0.1 , a : Optional[Any]=512 , a : Tuple=None , a : Union[str, Any]=True , a : int=False , a : int=False , a : Tuple=-1 , a : Any=False , a : Tuple="last" , a : List[str]=True , a : Optional[Any]="tanh" , a : List[Any]=0.1 , a : int=5 , a : List[Any]=5 , a : Optional[int]=5 , a : Dict=1 , a : Optional[Any]=2 , **a : List[Any] , )-> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = n_layer
lowercase__ = n_head
if d_model % n_head != 0:
raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
lowercase__ = d_model // n_head
lowercase__ = ff_activation
lowercase__ = d_inner
lowercase__ = untie_r
lowercase__ = attn_type
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = dropout
lowercase__ = mem_len
lowercase__ = reuse_len
lowercase__ = bi_data
lowercase__ = clamp_len
lowercase__ = same_length
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_last_dropout
lowercase__ = start_n_top
lowercase__ = end_n_top
lowercase__ = bos_token_id
lowercase__ = pad_token_id
lowercase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , a , )
lowercase__ = kwargs['use_cache']
lowercase__ = use_mems_eval
lowercase__ = use_mems_train
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] )-> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 269 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase : List[str] = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Dict = ["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : List[str] =size if size is not None else {"shortest_edge": 2_2_4}
a__ : Union[str, Any] =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a__ : int =crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
a__ : Any =get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
a__ : Any =do_resize
a__ : Optional[int] =size
a__ : Tuple =do_center_crop
a__ : Optional[int] =crop_size
a__ : Optional[int] =resample
a__ : str =do_rescale
a__ : str =rescale_factor
a__ : Dict =do_normalize
a__ : Union[str, Any] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ : str =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
a__ : List[Any] =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" in size:
a__ : int =get_resize_output_image_size(lowerCAmelCase__ , size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
elif "height" in size and "width" in size:
a__ : int =(size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
a__ : Union[str, Any] =get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
a__ : List[Any] =to_numpy_array(lowerCAmelCase__ )
if do_resize:
a__ : int =self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ )
if do_center_crop:
a__ : Union[str, Any] =self.center_crop(lowerCAmelCase__ , size=lowerCAmelCase__ )
if do_rescale:
a__ : List[Any] =self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ )
if do_normalize:
a__ : Dict =self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ )
a__ : int =to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ )
return image
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
'''simple docstring'''
a__ : int =do_resize if do_resize is not None else self.do_resize
a__ : List[str] =resample if resample is not None else self.resample
a__ : List[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : int =do_rescale if do_rescale is not None else self.do_rescale
a__ : Union[str, Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Dict =do_normalize if do_normalize is not None else self.do_normalize
a__ : Tuple =image_mean if image_mean is not None else self.image_mean
a__ : Tuple =image_std if image_std is not None else self.image_std
a__ : Tuple =size if size is not None else self.size
a__ : str =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a__ : Any =crop_size if crop_size is not None else self.crop_size
a__ : Dict =get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
a__ : List[Any] =make_batched(lowerCAmelCase__ )
a__ : str =[
[
self._preprocess_image(
image=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , crop_size=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ , rescale_factor=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , )
for img in video
]
for video in videos
]
a__ : Tuple ={"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 95 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Optional[int] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""xlnet"""
__UpperCAmelCase : Tuple =["""mems"""]
__UpperCAmelCase : List[str] ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __a=3_20_00 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=True , __a="bi" , __a=0.0_2 , __a=1e-1_2 , __a=0.1 , __a=5_12 , __a=None , __a=True , __a=False , __a=False , __a=-1 , __a=False , __a="last" , __a=True , __a="tanh" , __a=0.1 , __a=5 , __a=5 , __a=5 , __a=1 , __a=2 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__lowerCAmelCase = d_model // n_head
__lowerCAmelCase = ff_activation
__lowerCAmelCase = d_inner
__lowerCAmelCase = untie_r
__lowerCAmelCase = attn_type
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = dropout
__lowerCAmelCase = mem_len
__lowerCAmelCase = reuse_len
__lowerCAmelCase = bi_data
__lowerCAmelCase = clamp_len
__lowerCAmelCase = same_length
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_last_dropout
__lowerCAmelCase = start_n_top
__lowerCAmelCase = end_n_top
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __a , )
__lowerCAmelCase = kwargs["use_cache"]
__lowerCAmelCase = use_mems_eval
__lowerCAmelCase = use_mems_train
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
@property
def snake_case ( self ):
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self , __a ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 259 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ="""▁"""
__snake_case ={
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
__snake_case ={
"""facebook/s2t-small-librispeech-asr""": 1_024,
}
__snake_case =["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
__snake_case ={"""mustc""": MUSTC_LANGS}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = MAX_MODEL_INPUT_SIZES
lowerCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[int] = []
def __init__( self : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Tuple="<unk>" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : str , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , do_upper_case=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , lang_codes=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowerCAmelCase = do_upper_case
lowerCAmelCase = do_lower_case
lowerCAmelCase = load_json(UpperCAmelCase__ )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = spm_file
lowerCAmelCase = load_spm(UpperCAmelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
lowerCAmelCase = lang_codes
lowerCAmelCase = LANGUAGES[lang_codes]
lowerCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs]
lowerCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
lowerCAmelCase = self.lang_tokens
lowerCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowerCAmelCase = {}
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return len(self.encoder )
@property
def __UpperCAmelCase ( self : Tuple ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Tuple ) -> None:
lowerCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = self.lang_code_to_id[tgt_lang]
lowerCAmelCase = [lang_code_id]
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Dict ) -> Optional[int]:
return self.encoder.get(UpperCAmelCase__ , self.encoder[self.unk_token] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> str:
return self.decoder.get(UpperCAmelCase__ , self.unk_token )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[str] ) -> str:
lowerCAmelCase = []
lowerCAmelCase = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowerCAmelCase = self.sp_model.decode(UpperCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowerCAmelCase = self.sp_model.decode(UpperCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
lowerCAmelCase = [1] * len(self.prefix_tokens )
lowerCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase__ )) + ([0] * len(UpperCAmelCase__ )) + suffix_ones
def __UpperCAmelCase ( self : str ) -> Dict:
lowerCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Dict:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase__ : Dict ) -> None:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase = Path(UpperCAmelCase__ )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
lowerCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowerCAmelCase = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , UpperCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , UpperCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (str(UpperCAmelCase__ ), str(UpperCAmelCase__ ))
def a_ ( lowerCamelCase : str , lowerCamelCase : Dict[str, Any] ):
lowerCAmelCase = sentencepiece.SentencePieceProcessor(**lowerCamelCase )
spm.Load(str(lowerCamelCase ) )
return spm
def a_ ( lowerCamelCase : str ):
with open(lowerCamelCase , 'r' ) as f:
return json.load(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : str ):
with open(lowerCamelCase , 'w' ) as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=2 )
| 4 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class A__ ( A__ ):
A__ = 'deta'
A__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , _a : Optional[int]=None , _a : int=900 , _a : Optional[Any]=2048 , _a : int=6 , _a : Tuple=2048 , _a : Optional[int]=8 , _a : Any=6 , _a : str=1024 , _a : int=8 , _a : int=0.0 , _a : Optional[Any]=True , _a : Tuple="relu" , _a : Union[str, Any]=256 , _a : Tuple=0.1 , _a : str=0.0 , _a : Dict=0.0 , _a : Tuple=0.02 , _a : Union[str, Any]=1.0 , _a : Any=True , _a : Tuple=False , _a : List[Any]="sine" , _a : str=5 , _a : List[Any]=4 , _a : str=4 , _a : Union[str, Any]=True , _a : Optional[int]=300 , _a : Dict=True , _a : List[Any]=True , _a : List[Any]=1 , _a : List[str]=5 , _a : int=2 , _a : Dict=1 , _a : str=1 , _a : Optional[Any]=5 , _a : Union[str, Any]=2 , _a : List[str]=0.1 , _a : List[Any]=0.25 , **_a : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =backbone_config.pop('model_type' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE =config_class.from_dict(_a )
_SCREAMING_SNAKE_CASE =backbone_config
_SCREAMING_SNAKE_CASE =num_queries
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =init_xavier_std
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =auxiliary_loss
_SCREAMING_SNAKE_CASE =position_embedding_type
# deformable attributes
_SCREAMING_SNAKE_CASE =num_feature_levels
_SCREAMING_SNAKE_CASE =encoder_n_points
_SCREAMING_SNAKE_CASE =decoder_n_points
_SCREAMING_SNAKE_CASE =two_stage
_SCREAMING_SNAKE_CASE =two_stage_num_proposals
_SCREAMING_SNAKE_CASE =with_box_refine
_SCREAMING_SNAKE_CASE =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_SCREAMING_SNAKE_CASE =class_cost
_SCREAMING_SNAKE_CASE =bbox_cost
_SCREAMING_SNAKE_CASE =giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE =mask_loss_coefficient
_SCREAMING_SNAKE_CASE =dice_loss_coefficient
_SCREAMING_SNAKE_CASE =bbox_loss_coefficient
_SCREAMING_SNAKE_CASE =giou_loss_coefficient
_SCREAMING_SNAKE_CASE =eos_coefficient
_SCREAMING_SNAKE_CASE =focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def A ( self : Dict ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A ( self : List[Any] ) -> int:
'''simple docstring'''
return self.d_model
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output
| 47 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__a = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : Union[str, Any] = []
def parse_line(_lowercase ):
for line in fp:
if isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : str = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(_lowercase ) > 0:
UpperCAmelCase_ : Optional[int] = "\n".join(_lowercase )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_lowercase )
buffer.clear()
continue
else:
UpperCAmelCase_ : List[str] = line.strip()
buffer.append(_lowercase )
if from_gh:
for filename in os.listdir(_lowercase ):
UpperCAmelCase_ : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowercase ) as fp:
parse_line(_lowercase )
else:
try:
with zipfile.ZipFile(_lowercase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowercase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowercase ) as fp:
parse_line(_lowercase )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = set()
UpperCAmelCase_ : Union[str, Any] = [os.path.join(_lowercase , _lowercase ) for p in os.listdir(_lowercase ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowercase , _lowercase ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return values.split(''',''' )
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__a = parser.parse_args()
__a = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__a = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__a = extract_warnings(args.output_dir, args.targets)
__a = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 370 |
from ...processing_utils import ProcessorMixin
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''SpeechT5FeatureExtractor'''
lowerCAmelCase = '''SpeechT5Tokenizer'''
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def __call__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : List[str] = kwargs.pop('''audio''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = kwargs.pop('''text''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = kwargs.pop('''text_target''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = kwargs.pop('''audio_target''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = kwargs.pop('''sampling_rate''' ,_SCREAMING_SNAKE_CASE )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
UpperCAmelCase_ : Optional[Any] = self.feature_extractor(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
elif text is not None:
UpperCAmelCase_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = None
if audio_target is not None:
UpperCAmelCase_ : List[Any] = self.feature_extractor(audio_target=_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = targets['''input_values''']
elif text_target is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = targets['''input_ids''']
else:
UpperCAmelCase_ : Tuple = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Dict = labels
UpperCAmelCase_ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ : List[Any] = decoder_attention_mask
return inputs
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Dict = kwargs.pop('''input_values''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''input_ids''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = kwargs.pop('''labels''' ,_SCREAMING_SNAKE_CASE )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
UpperCAmelCase_ : Tuple = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
elif input_ids is not None:
UpperCAmelCase_ : Tuple = self.tokenizer.pad(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and "input_ids" in labels[0]):
UpperCAmelCase_ : Tuple = self.tokenizer.pad(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = targets['''input_ids''']
else:
UpperCAmelCase_ : int = self.feature_extractor.feature_size
UpperCAmelCase_ : List[str] = self.feature_extractor.num_mel_bins
UpperCAmelCase_ : Optional[Any] = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = feature_size_hack
UpperCAmelCase_ : List[Any] = targets['''input_values''']
else:
UpperCAmelCase_ : Optional[int] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Optional[int] = labels
UpperCAmelCase_ : Union[str, Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ : List[Any] = decoder_attention_mask
return inputs
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 235 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str=0):
__lowerCamelCase : Tuple = np.random.RandomState(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : Tuple = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCamelCase : Any = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
__lowerCamelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_dummy_inputs()
__lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
__lowerCamelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.get_dummy_inputs()
__lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCamelCase : List[str] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
__lowerCamelCase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_dummy_inputs()
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCamelCase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
__lowerCamelCase : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.get_dummy_inputs()
__lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCamelCase : List[Any] = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
__lowerCamelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCamelCase : List[str] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.get_dummy_inputs()
__lowerCamelCase : Optional[int] = 3 * [inputs['prompt']]
# forward
__lowerCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
__lowerCamelCase : Union[str, Any] = 3 * [inputs.pop('prompt')]
__lowerCamelCase : Tuple = pipe.tokenizer(
SCREAMING_SNAKE_CASE__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors='np' ,)
__lowerCamelCase : List[Any] = text_inputs['input_ids']
__lowerCamelCase : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__lowerCamelCase : List[Any] = prompt_embeds
# forward
__lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.get_dummy_inputs()
__lowerCamelCase : Dict = 3 * ['this is a negative prompt']
__lowerCamelCase : Dict = negative_prompt
__lowerCamelCase : Any = 3 * [inputs['prompt']]
# forward
__lowerCamelCase : Dict = pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = output.images[0, -3:, -3:, -1]
__lowerCamelCase : Any = self.get_dummy_inputs()
__lowerCamelCase : Dict = 3 * [inputs.pop('prompt')]
__lowerCamelCase : Optional[int] = []
for p in [prompt, negative_prompt]:
__lowerCamelCase : Any = pipe.tokenizer(
SCREAMING_SNAKE_CASE__ ,padding='max_length' ,max_length=pipe.tokenizer.model_max_length ,truncation=SCREAMING_SNAKE_CASE__ ,return_tensors='np' ,)
__lowerCamelCase : Optional[int] = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__lowerCamelCase , __lowerCamelCase : Optional[int] = embeds
# forward
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : str):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[Any] = ort.SessionOptions()
__lowerCamelCase : Any = False
return options
def lowerCAmelCase ( self : Union[str, Any]):
# using the PNDM scheduler by default
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = 'A painting of a squirrel eating a burger'
np.random.seed(0)
__lowerCamelCase : Dict = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=1_0 ,output_type='np')
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : Optional[int] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx')
__lowerCamelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = 'open neural network exchange'
__lowerCamelCase : Union[str, Any] = np.random.RandomState(0)
__lowerCamelCase : int = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np')
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx')
__lowerCamelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = 'open neural network exchange'
__lowerCamelCase : Dict = np.random.RandomState(0)
__lowerCamelCase : Optional[int] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=SCREAMING_SNAKE_CASE__ ,output_type='np')
__lowerCamelCase : Union[str, Any] = output.images
__lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowerCamelCase : List[str] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[str] = 0
def test_callback_fn(SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : np.ndarray) -> None:
__lowerCamelCase : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCamelCase : Dict = latents[0, -3:, -3:, -1]
__lowerCamelCase : Dict = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowerCamelCase : Tuple = latents[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
__lowerCamelCase : int = False
__lowerCamelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = 'Andromeda galaxy in a bottle'
__lowerCamelCase : str = np.random.RandomState(0)
pipe(
prompt=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
assert pipe.safety_checker is None
__lowerCamelCase : Union[str, Any] = pipe('example prompt' ,num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCamelCase : Tuple = pipe('example prompt' ,num_inference_steps=2).images[0]
assert image is not None
| 73 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 0 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ):
_enforce_args(snake_case__ , snake_case__ )
if n == 0:
return 0
lowercase_ : Dict = float('-inf' )
for i in range(1 , n + 1 ):
lowercase_ : List[str] = max(
snake_case__ , prices[i - 1] + naive_cut_rod_recursive(n - i , snake_case__ ) )
return max_revue
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ):
_enforce_args(snake_case__ , snake_case__ )
lowercase_ : Union[str, Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case__ , snake_case__ , snake_case__ )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase_ : Union[str, Any] = float('-inf' )
for i in range(1 , n + 1 ):
lowercase_ : int = max(
snake_case__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , snake_case__ , snake_case__ ) , )
lowercase_ : Optional[int] = max_revenue
return max_rev[n]
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ):
_enforce_args(snake_case__ , snake_case__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase_ : Dict = [float('-inf' ) for _ in range(n + 1 )]
lowercase_ : Optional[int] = 0
for i in range(1 , n + 1 ):
lowercase_ : Any = max_rev[i]
for j in range(1 , i + 1 ):
lowercase_ : int = max(snake_case__ , prices[j - 1] + max_rev[i - j] )
lowercase_ : Optional[Any] = max_revenue_i
return max_rev[n]
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list ):
if n < 0:
lowercase_ : Dict = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case__ )
if n > len(snake_case__ ):
lowercase_ : Optional[int] = (
'Each integral piece of rod must have a corresponding price. '
F'''Got n = {n} but length of prices = {len(snake_case__ )}'''
)
raise ValueError(snake_case__ )
def lowercase__( ):
lowercase_ : List[Any] = [6, 10, 12, 15, 20, 23]
lowercase_ : Dict = len(snake_case__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase_ : Tuple = 36
lowercase_ : List[str] = top_down_cut_rod(snake_case__ , snake_case__ )
lowercase_ : Any = bottom_up_cut_rod(snake_case__ , snake_case__ )
lowercase_ : int = naive_cut_rod_recursive(snake_case__ , snake_case__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 365 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Tuple = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Any = use_input_mask
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Union[str, Any] = scope
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = True
lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,)
lowercase_ : Dict = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int:
'''simple docstring'''
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = True
lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
lowercase_ : str = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,)
lowercase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 )
lowercase_ : int = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
lowercase_ : List[Any] = model(
__UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0]
# select random slice
lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[Any] = BertGenerationEncoderTester(self )
lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs()
lowercase_ : Optional[int] = 'bert'
self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,)
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Tuple = model(__UpperCamelCase )[0]
lowercase_ : Dict = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : str = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : Dict = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321 | 0 |
import random
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = {i: [] for i in range(lowerCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase__ ):
for j in range(i + 1 , lowerCamelCase__ ):
if random.random() < probability:
graph[i].append(lowerCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase__ )
return graph
def lowerCamelCase_ ( lowerCamelCase__ ):
return {
i: [j for j in range(lowerCamelCase__ ) if i != j] for i in range(lowerCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__snake_case : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__snake_case : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'whisper'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Any=5_1865 , _SCREAMING_SNAKE_CASE: Optional[Any]=80 , _SCREAMING_SNAKE_CASE: Optional[int]=6 , _SCREAMING_SNAKE_CASE: Any=4 , _SCREAMING_SNAKE_CASE: Dict=6 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=1536 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: List[str]=5_0257 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: Tuple=256 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=1500 , _SCREAMING_SNAKE_CASE: str=448 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: Any=5_0256 , _SCREAMING_SNAKE_CASE: List[str]=5_0256 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: List[Any]=[220, 5_0256] , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: str=256 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Tuple=0.05 , _SCREAMING_SNAKE_CASE: List[str]=10 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=10 , _SCREAMING_SNAKE_CASE: int=0 , _SCREAMING_SNAKE_CASE: Any=7 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : List[str] = decoder_layers
__lowerCAmelCase : Tuple = decoder_attention_heads
__lowerCAmelCase : Any = decoder_ffn_dim
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : Union[str, Any] = attention_dropout
__lowerCAmelCase : Union[str, Any] = activation_dropout
__lowerCAmelCase : Dict = activation_function
__lowerCAmelCase : Tuple = init_std
__lowerCAmelCase : str = encoder_layerdrop
__lowerCAmelCase : int = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Union[str, Any] = encoder_layers
__lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : int = max_source_positions
__lowerCAmelCase : Any = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : Dict = classifier_proj_size
__lowerCAmelCase : Dict = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : int = apply_spec_augment
__lowerCAmelCase : Union[str, Any] = mask_time_prob
__lowerCAmelCase : str = mask_time_length
__lowerCAmelCase : int = mask_time_min_masks
__lowerCAmelCase : List[Any] = mask_feature_prob
__lowerCAmelCase : Tuple = mask_feature_length
__lowerCAmelCase : Any = mask_feature_min_masks
__lowerCAmelCase : Union[str, Any] = median_filter_width
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , suppress_tokens=_SCREAMING_SNAKE_CASE , begin_suppress_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
__lowerCAmelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase : Tuple = {0: "batch"}
else:
__lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs")
return common_inputs
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional["TensorType"] = None , _SCREAMING_SNAKE_CASE: int = 2_2050 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = OrderedDict()
__lowerCAmelCase : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , time_duration=_SCREAMING_SNAKE_CASE , frequency=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase : List[str] = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = encoder_inputs.pop("input_features")
__lowerCAmelCase : List[Any] = decoder_inputs.pop("decoder_input_ids")
if "past_key_values" in decoder_inputs:
__lowerCAmelCase : int = decoder_inputs.pop("past_key_values")
return dummy_inputs
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> float:
"""simple docstring"""
return 1e-3 | 269 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ : Optional[int] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
__UpperCAmelCase : int = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
__UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = pipe(
image=UpperCAmelCase_ , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
__UpperCAmelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCAmelCase : Any = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 37 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fnet'''
def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCAmelCase : List[Any] = tpu_short_seq_length
| 37 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Union[str, Any]=[1, 1, 2, 1] ,lowerCamelCase__ : str=True ,lowerCamelCase__ : str=True ,lowerCamelCase__ : List[str]="relu" ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : int=None ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embeddings_size
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = scope
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ):
UpperCAmelCase__ = TFResNetModel(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ):
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = model(SCREAMING_SNAKE_CASE_ ,labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case__ = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = TFResNetModelTester(self )
UpperCAmelCase__ = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,has_text_modality=SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : List[str] ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCAmelCase ( self : Tuple ):
pass
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self : Optional[int] ):
def check_hidden_states_output(lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase__ = layer_type
UpperCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def __lowerCAmelCase ( self : str ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a_ ( ):
UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ ,return_tensors='tf' )
# forward pass
UpperCAmelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCAmelCase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,SCREAMING_SNAKE_CASE_ ,atol=1e-4 ) )
| 98 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] =ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = (3, 32, 128)
UpperCamelCase :Any = tempfile.mkdtemp()
# fmt: off
UpperCamelCase :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCamelCase :Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
UpperCamelCase :Tuple = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCamelCase :str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase :List[Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :str = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = self.get_tokenizer()
UpperCamelCase :Dict = self.get_image_processor()
UpperCamelCase :List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase :Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase :Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
UpperCamelCase :int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :Tuple = self.get_image_processor()
UpperCamelCase :List[str] = self.get_tokenizer()
UpperCamelCase :str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = self.prepare_image_inputs()
UpperCamelCase :List[str] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase :Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Union[str, Any] = self.get_tokenizer()
UpperCamelCase :int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = '''test'''
UpperCamelCase :Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = '''test'''
UpperCamelCase :str = self.prepare_image_inputs()
UpperCamelCase :Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.get_image_processor()
UpperCamelCase :Any = self.get_tokenizer()
UpperCamelCase :Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase :Union[str, Any] = processor.char_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase :List[Any] = self.get_image_processor()
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = None
UpperCamelCase :List[Any] = self.prepare_image_inputs()
UpperCamelCase :Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :str = self.get_image_processor()
UpperCamelCase :Tuple = self.get_tokenizer()
UpperCamelCase :Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :str = torch.randn(1 , 27 , 38 )
UpperCamelCase :Union[str, Any] = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase :Optional[Any] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase :Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 259 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int]=13 , __UpperCAmelCase : int=3 , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=30 , __UpperCAmelCase : Dict=400 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , __UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , ) ->Dict:
"""simple docstring"""
a = size if size is not None else {'''height''': 18, '''width''': 18}
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_normalize
a = image_mean
a = image_std
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ViTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
a = EfficientFormerImageProcessorTester(self )
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
a = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
a = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
a = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
a = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
a = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
a = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 26 |
def _a ( a :list ) -> list:
if len(a ) <= 1:
return lst
a = 1
while i < len(a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a , a = lst[i], lst[i - 1]
i -= 1
if i == 0:
a = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 26 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A ( _lowerCamelCase="" ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.rand(12, dtype=torch.floataa) - 0.5
_lowerCAmelCase : List[str] = AgentAudio(__a)
_lowerCAmelCase : int = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__a, agent_type.to_raw(), atol=1E-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__a))
# Ensure that the file contains the same value as the original tensor
_lowerCAmelCase , _lowerCAmelCase : List[str] = sf.read(__a)
self.assertTrue(torch.allclose(__a, torch.tensor(__a), atol=1E-4))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = torch.rand(12, dtype=torch.floataa) - 0.5
_lowerCAmelCase : Optional[int] = get_new_path(suffix=".wav")
sf.write(__a, __a, 1_6000)
_lowerCAmelCase : Any = AgentAudio(__a)
self.assertTrue(torch.allclose(__a, agent_type.to_raw(), atol=1E-4))
self.assertEqual(agent_type.to_string(), __a)
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.randint(0, 256, (64, 64, 3))
_lowerCAmelCase : str = AgentImage(__a)
_lowerCAmelCase : Optional[Any] = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__a, agent_type._tensor, atol=1E-4))
self.assertIsInstance(agent_type.to_raw(), Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_lowerCAmelCase : Tuple = Image.open(__a)
_lowerCAmelCase : Optional[Any] = AgentImage(__a)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png"
_lowerCAmelCase : Optional[Any] = Image.open(__a)
_lowerCAmelCase : Any = AgentImage(__a)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__a))
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = "Hey!"
_lowerCAmelCase : Any = AgentText(__a)
self.assertEqual(__a, agent_type.to_string())
self.assertEqual(__a, agent_type.to_raw())
self.assertEqual(__a, __a)
| 36 |
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(__a : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__a )
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__a : int ,__a : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_a : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item ,__a )
for item in array )
_a : Optional[int] = answer
return answer
_a : int = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__a ,__a )
def __UpperCAmelCase ( __a : int ,__a : list[int] ,__a : int ) -> int:
"""simple docstring"""
_a : str = [0] * (target + 1)
_a : Optional[Any] = 1
for i in range(1 ,target + 1 ):
for j in range(__a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = 3
a__ = 5
a__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 235 | 0 |
class UpperCAmelCase :
def __init__( self :Optional[Any] , lowercase_ :Dict , lowercase_ :Dict , lowercase_ :Union[str, Any] )-> Optional[Any]:
A__ = None
A__ = None
A__ = graph
self._normalize_graph(lowercase_ , lowercase_ )
A__ = len(lowercase_ )
A__ = None
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Dict , lowercase_ :Union[str, Any] )-> Union[str, Any]:
if sources is int:
A__ = [sources]
if sinks is int:
A__ = [sinks]
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
return
A__ = sources[0]
A__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase_ ) > 1 or len(lowercase_ ) > 1:
A__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A__ = max_input_flow
A__ = 0
A__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A__ = max_input_flow
A__ = size - 1
def UpperCAmelCase_ ( self :int )-> Any:
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ ( self :int , lowercase_ :str )-> List[Any]:
A__ = algorithm(self )
class UpperCAmelCase :
def __init__( self :str , lowercase_ :Union[str, Any] )-> Union[str, Any]:
A__ = flow_network
A__ = flow_network.verticesCount
A__ = flow_network.sourceIndex
A__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A__ = flow_network.graph
A__ = False
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
if not self.executed:
self._algorithm()
A__ = True
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
pass
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :List[str] , lowercase_ :Optional[Any] )-> Optional[Any]:
super().__init__(lowercase_ )
# use this to save your result
A__ = -1
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Tuple , lowercase_ :Optional[Any] )-> List[str]:
super().__init__(lowercase_ )
A__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A__ = [0] * self.verticies_count
A__ = [0] * self.verticies_count
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[Any]:
A__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A__ = 0
while i < len(lowercase_ ):
A__ = vertices_list[i]
A__ = self.heights[vertex_index]
self.process_vertex(lowercase_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase_ ) )
A__ = 0
else:
i += 1
A__ = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ ( self :Any , lowercase_ :str )-> Dict:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase_ , lowercase_ )
self.relabel(lowercase_ )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[int] , lowercase_ :Any )-> Any:
A__ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Dict )-> int:
A__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A__ = self.heights[to_index]
if min_height is not None:
A__ = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =[0]
__lowerCAmelCase : Optional[int] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : str =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Dict =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Tuple =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 358 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
A__ = F"Input value of [number={number}] must be > 0"
raise ValueError(_lowerCamelCase )
A__ = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 0 |
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
a__ : Tuple =len(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
a__ , a__ : Tuple =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase : Dict = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 95 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 0 |
'''simple docstring'''
import math
def A_( A : int) -> Optional[Any]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_( A : float = 0.1) -> Optional[int]:
UpperCamelCase = 3
UpperCamelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(A)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[int] = '''pix2struct_text_model'''
__lowercase : str = ['''past_key_values''']
__lowercase : Dict = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,__UpperCAmelCase=5_0244 ,__UpperCAmelCase=768 ,__UpperCAmelCase=64 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=32 ,__UpperCAmelCase=128 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Tuple:
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : List[Any] = d_kv
lowerCAmelCase__ : Optional[Any] = d_ff
lowerCAmelCase__ : Optional[int] = num_layers
lowerCAmelCase__ : Optional[Any] = num_heads
lowerCAmelCase__ : str = relative_attention_num_buckets
lowerCAmelCase__ : Dict = relative_attention_max_distance
lowerCAmelCase__ : Optional[Any] = dropout_rate
lowerCAmelCase__ : Optional[int] = layer_norm_epsilon
lowerCAmelCase__ : List[str] = initializer_factor
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : int = eos_token_id
lowerCAmelCase__ : Optional[int] = decoder_start_token_id
# for backwards compatibility
lowerCAmelCase__ : Optional[int] = dense_act_fn
super().__init__(
pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,tie_word_embeddings=__UpperCAmelCase ,is_decoder=__UpperCAmelCase ,**__UpperCAmelCase ,)
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase__ : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''pix2struct_vision_model'''
def __init__( self ,__UpperCAmelCase=768 ,__UpperCAmelCase=768 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=64 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=1E-10 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=32 ,__UpperCAmelCase=128 ,**__UpperCAmelCase ,) -> Union[str, Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Dict = patch_embed_hidden_size
lowerCAmelCase__ : List[Any] = d_ff
lowerCAmelCase__ : Dict = dropout_rate
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[Any] = initializer_factor
lowerCAmelCase__ : Dict = attention_dropout
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : str = dense_act_fn
lowerCAmelCase__ : Tuple = seq_len
lowerCAmelCase__ : List[Any] = relative_attention_num_buckets
lowerCAmelCase__ : Any = relative_attention_max_distance
lowerCAmelCase__ : List[Any] = d_kv
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase__ : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = '''pix2struct'''
__lowercase : str = True
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Tuple:
super().__init__(tie_word_embeddings=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,**__UpperCAmelCase )
if text_config is None:
lowerCAmelCase__ : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowerCAmelCase__ : List[Any] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowerCAmelCase__ : Optional[Any] = PixaStructTextConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Any = PixaStructVisionConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self.text_config.decoder_start_token_id
lowerCAmelCase__ : List[str] = self.text_config.pad_token_id
lowerCAmelCase__ : Any = self.text_config.eos_token_id
lowerCAmelCase__ : Dict = initializer_factor
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : int = self.initializer_range
lowerCAmelCase__ : Tuple = self.initializer_range
lowerCAmelCase__ : Optional[int] = is_vqa
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : str = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Any = self.text_config.to_dict()
lowerCAmelCase__ : Optional[Any] = self.vision_config.to_dict()
lowerCAmelCase__ : Any = self.__class__.model_type
return output
| 37 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,**__UpperCAmelCase ) -> Tuple:
super().__init__(**__UpperCAmelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int:
lowerCAmelCase__ : str = load_image(__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework )
lowerCAmelCase__ : List[Any] = candidate_labels
lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = [text_inputs]
return inputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" )
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,__UpperCAmelCase ):
lowerCAmelCase__ : int = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : Dict = text_inputs[0][0]
lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Optional[Any] = probs.tolist()
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 )
lowerCAmelCase__ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowerCAmelCase__ : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 37 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case_ = logging.get_logger(__name__)
# General docstring
snake_case_ = "RegNetConfig"
# Base docstring
snake_case_ = "facebook/regnet-y-040"
snake_case_ = [1, 1088, 7, 7]
# Image classification docstring
snake_case_ = "facebook/regnet-y-040"
snake_case_ = "tabby, tabby cat"
snake_case_ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : Union[str, Any] , a__ : Dict , a__ : List[str] = 3 , a__ : List[str] = 1 , a__ : Union[str, Any] = 1 , a__ : Dict = "relu" , **a__ : int , ):
"""simple docstring"""
super().__init__(**A__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__snake_case = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__snake_case = tf.keras.layers.ConvaD(
filters=A__ , kernel_size=A__ , strides=A__ , padding='''VALID''' , groups=A__ , use_bias=A__ , name='''convolution''' , )
__snake_case = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
__snake_case = ACTaFN[activation] if activation is not None else tf.identity
def a (self : int , a__ : int ):
"""simple docstring"""
__snake_case = self.convolution(self.padding(A__ ) )
__snake_case = self.normalization(A__ )
__snake_case = self.activation(A__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : Dict , a__ : List[Any] , **a__ : str ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = config.num_channels
__snake_case = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def a (self : str , a__ : List[str] ):
"""simple docstring"""
__snake_case = shape_list(A__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__snake_case = tf.transpose(A__ , perm=(0, 2, 3, 1) )
__snake_case = self.embedder(A__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : Union[str, Any] , a__ : Dict , a__ : Optional[Any] = 2 , **a__ : Any ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = tf.keras.layers.ConvaD(
filters=A__ , kernel_size=1 , strides=A__ , use_bias=A__ , name='''convolution''' )
__snake_case = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def a (self : Tuple , a__ : List[str] , a__ : Dict = False ):
"""simple docstring"""
return self.normalization(self.convolution(A__ ) , training=A__ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : Union[str, Any] , a__ : Any , a__ : Any , **a__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A__ , name='''pooler''' )
__snake_case = [
tf.keras.layers.ConvaD(filters=A__ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=A__ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
__snake_case = self.pooler(A__ )
for layer_module in self.attention:
__snake_case = layer_module(A__ )
__snake_case = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : Tuple , a__ : Optional[int] , a__ : List[str] , a__ : List[Any] , a__ : Optional[Any] = 1 , **a__ : Dict ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width )
__snake_case = (
TFRegNetShortCut(A__ , stride=A__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__snake_case = [
TFRegNetConvLayer(A__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
A__ , stride=A__ , groups=A__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(A__ , kernel_size=1 , activation=A__ , name='''layer.2''' ),
]
__snake_case = ACTaFN[config.hidden_act]
def a (self : Dict , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = hidden_state
for layer_module in self.layers:
__snake_case = layer_module(A__ )
__snake_case = self.shortcut(A__ )
hidden_state += residual
__snake_case = self.activation(A__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : List[Any] , a__ : Optional[int] , a__ : Dict , a__ : int , a__ : Tuple = 1 , **a__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = in_channels != out_channels or stride != 1
__snake_case = max(1 , out_channels // config.groups_width )
__snake_case = (
TFRegNetShortCut(A__ , stride=A__ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__snake_case = [
TFRegNetConvLayer(A__ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
A__ , stride=A__ , groups=A__ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(A__ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(A__ , kernel_size=1 , activation=A__ , name='''layer.3''' ),
]
__snake_case = ACTaFN[config.hidden_act]
def a (self : int , a__ : Dict ):
"""simple docstring"""
__snake_case = hidden_state
for layer_module in self.layers:
__snake_case = layer_module(A__ )
__snake_case = self.shortcut(A__ )
hidden_state += residual
__snake_case = self.activation(A__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : Union[str, Any] , a__ : Any , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any = 2 , a__ : Union[str, Any] = 2 , **a__ : Optional[int] ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__snake_case = [
# downsampling is done in the first layer with stride of 2
layer(A__ , A__ , A__ , stride=A__ , name='''layers.0''' ),
*[layer(A__ , A__ , A__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def a (self : Tuple , a__ : int ):
"""simple docstring"""
for layer_module in self.layers:
__snake_case = layer_module(A__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__(self : Tuple , a__ : List[str] , **a__ : int ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A__ , A__ , A__ , depth=A__ , name=f"""stages.{i+1}""" ) )
def a (self : Any , a__ : Any , a__ : str = False , a__ : Any = True ):
"""simple docstring"""
__snake_case = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
__snake_case = stage_module(A__ )
if output_hidden_states:
__snake_case = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A__ , hidden_states=A__ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
A_ : int = RegNetConfig
def __init__(self : Tuple , a__ : List[str] , **a__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**A__ )
__snake_case = config
__snake_case = TFRegNetEmbeddings(A__ , name='''embedder''' )
__snake_case = TFRegNetEncoder(A__ , name='''encoder''' )
__snake_case = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A__ , name='''pooler''' )
@unpack_inputs
def a (self : Optional[int] , a__ : Union[str, Any] , a__ : Optional[int] = None , a__ : List[Any] = None , a__ : Union[str, Any] = False , ):
"""simple docstring"""
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.embedder(A__ , training=A__ )
__snake_case = self.encoder(
A__ , output_hidden_states=A__ , return_dict=A__ , training=A__ )
__snake_case = encoder_outputs[0]
__snake_case = self.pooler(A__ )
# Change to NCHW output format have uniformity in the modules
__snake_case = tf.transpose(A__ , perm=(0, 3, 1, 2) )
__snake_case = tf.transpose(A__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__snake_case = tuple([tf.transpose(A__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A__ , pooler_output=A__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
A_ : List[str] = RegNetConfig
A_ : Any = '''regnet'''
A_ : str = '''pixel_values'''
@property
def a (self : List[str] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
snake_case_ = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
snake_case_ = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
def __init__(self : Dict , a__ : Optional[Any] , *a__ : Any , **a__ : List[str] ):
"""simple docstring"""
super().__init__(A__ , *A__ , **A__ )
__snake_case = TFRegNetMainLayer(A__ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a (self : int , a__ : List[str] , a__ : Union[str, Any] = None , a__ : Union[str, Any] = None , a__ : List[str]=False , ):
"""simple docstring"""
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.regnet(
pixel_values=A__ , output_hidden_states=A__ , return_dict=A__ , training=A__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__(self : Tuple , a__ : Union[str, Any] , *a__ : Any , **a__ : List[Any] ):
"""simple docstring"""
super().__init__(A__ , *A__ , **A__ )
__snake_case = config.num_labels
__snake_case = TFRegNetMainLayer(A__ , name='''regnet''' )
# classification head
__snake_case = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a (self : List[str] , a__ : int = None , a__ : Union[str, Any] = None , a__ : Optional[Any] = None , a__ : Union[str, Any] = None , a__ : Dict=False , ):
"""simple docstring"""
__snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__snake_case = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case = self.regnet(
A__ , output_hidden_states=A__ , return_dict=A__ , training=A__ )
__snake_case = outputs.pooler_output if return_dict else outputs[1]
__snake_case = self.classifier[0](A__ )
__snake_case = self.classifier[1](A__ )
__snake_case = None if labels is None else self.hf_compute_loss(labels=A__ , logits=A__ )
if not return_dict:
__snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A__ , logits=A__ , hidden_states=outputs.hidden_states )
| 359 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__(self : Optional[int] , a__ : Tuple=None , **a__ : Optional[int] ):
"""simple docstring"""
super().__init__(features=a__ )
__snake_case = torch_tensor_kwargs
import torch # noqa import torch at initialization
def a (self : Union[str, Any] , a__ : Union[str, Any] ):
"""simple docstring"""
import torch
if isinstance(a__ , a__ ) and column:
if all(
isinstance(a__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a__ )
return column
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
import torch
if isinstance(a__ , (str, bytes, type(a__ )) ):
return value
elif isinstance(a__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case = {}
if isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case = {'''dtype''': torch.intaa}
elif isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a__ , PIL.Image.Image ):
__snake_case = np.asarray(a__ )
return torch.tensor(a__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def a (self : Optional[int] , a__ : str ):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(a__ , '''__array__''' ) and not isinstance(a__ , torch.Tensor ):
__snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
elif isinstance(a__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
return self._tensorize(a__ )
def a (self : Optional[Any] , a__ : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , a__ , map_list=a__ )
def a (self : Optional[int] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_row(a__ )
__snake_case = self.python_features_decoder.decode_row(a__ )
return self.recursive_tensorize(a__ )
def a (self : List[Any] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_column(a__ )
__snake_case = self.python_features_decoder.decode_column(a__ , pa_table.column_names[0] )
__snake_case = self.recursive_tensorize(a__ )
__snake_case = self._consolidate(a__ )
return column
def a (self : Optional[int] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_batch(a__ )
__snake_case = self.python_features_decoder.decode_batch(a__ )
__snake_case = self.recursive_tensorize(a__ )
for column_name in batch:
__snake_case = self._consolidate(batch[column_name] )
return batch
| 238 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( UpperCamelCase__ ):
_a = "poolformer"
def __init__( self , _a=3 , _a=16 , _a=16 , _a=3 , _a=4.0 , _a=[2, 2, 6, 2] , _a=[64, 128, 320, 512] , _a=[7, 3, 3, 3] , _a=[4, 2, 2, 2] , _a=[2, 1, 1, 1] , _a=4 , _a=0.0 , _a="gelu" , _a=True , _a=1e-5 , _a=0.02 , **_a , ) -> List[str]:
_A : List[str] = num_channels
_A : List[Any] = patch_size
_A : List[str] = stride
_A : int = padding
_A : Optional[Any] = pool_size
_A : str = hidden_sizes
_A : int = mlp_ratio
_A : Optional[int] = depths
_A : Dict = patch_sizes
_A : List[Any] = strides
_A : Tuple = num_encoder_blocks
_A : Dict = drop_path_rate
_A : Dict = hidden_act
_A : str = use_layer_scale
_A : List[str] = layer_scale_init_value
_A : Dict = initializer_range
super().__init__(**_a )
class lowercase ( UpperCamelCase__ ):
_a = version.parse("1.11" )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self ) -> float:
return 2e-3
| 26 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A ( unittest.TestCase ):
def __init__(self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]=1_3 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Union[str, Any]=9_9 , __UpperCAmelCase : Optional[Any]=3_2 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Dict=3_7 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=5_1_2 , __UpperCAmelCase : List[Any]=1_6 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : List[str]=4 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( _a , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[int] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = FlaxBertModelTester(self )
@slow
def lowercase_ (self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = FlaxBertModel.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 370 | from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A :
def __init__(self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=9_9 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Tuple=3_7 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=5_1_2 , __UpperCAmelCase : Union[str, Any]=1_6 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Any=None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = True
UpperCAmelCase__ = TFRoFormerForCausalLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForSequenceClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFRoFormerForMultipleChoice(config=__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRoFormerForTokenClassification(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[Any] = False
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCAmelCase )
def lowercase_ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
# TODO Replace vocab size
UpperCAmelCase__ = 5_0_0_0_0
UpperCAmelCase__ = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCAmelCase__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Tuple = 1E-4
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tf.constant([[4, 1_0]] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCAmelCase__ = emba(input_ids.shape )
UpperCAmelCase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
UpperCAmelCase__ = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance )
@require_tf
class A ( unittest.TestCase ):
__UpperCAmelCase : Any = 1E-4
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
UpperCAmelCase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
UpperCAmelCase__ = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
UpperCAmelCase__ , UpperCAmelCase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
UpperCAmelCase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
| 143 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def UpperCamelCase_( snake_case : Tuple , snake_case : List[Any] ):
'''simple docstring'''
def run_func(snake_case : Tuple ):
@wraps(__lowerCamelCase )
def run_in_eager_mode(*snake_case : Tuple , **snake_case : Dict ):
return func(*__lowerCamelCase , **__lowerCamelCase )
@wraps(__lowerCamelCase )
@tf.function(experimental_compile=__lowerCamelCase )
def run_in_graph_mode(*snake_case : Tuple , **snake_case : List[str] ):
return func(*__lowerCamelCase , **__lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase_( snake_case : List[str] , snake_case : Dict , snake_case : List[str] ):
'''simple docstring'''
snake_case_ = random.Random()
snake_case_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _snake_case ( _lowerCAmelCase ):
lowerCAmelCase_ : TensorFlowBenchmarkArguments
lowerCAmelCase_ : PretrainedConfig
lowerCAmelCase_ : str = "TensorFlow"
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return tf.__version__
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> float:
'''simple docstring'''
snake_case_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ = self._prepare_inference_func(a__ , a__ , a__ )
return self._measure_speed(_inference )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> float:
'''simple docstring'''
snake_case_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ = self._prepare_train_func(a__ , a__ , a__ )
return self._measure_speed(_train )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a__ )
snake_case_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ = self._prepare_inference_func(a__ , a__ , a__ )
return self._measure_memory(_inference )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a__ )
snake_case_ = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ = self._prepare_train_func(a__ , a__ , a__ )
return self._measure_memory(_train )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Callable[[], None]:
'''simple docstring'''
snake_case_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case_ = (
hasattr(a__ , "architectures" )
and isinstance(config.architectures , a__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case_ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case_ = __import__("transformers" , fromlist=[model_class] )
snake_case_ = getattr(a__ , a__ )
snake_case_ = model_cls(a__ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case_ = TF_MODEL_MAPPING[config.__class__](a__ )
# encoder-decoder has vocab size saved differently
snake_case_ = config.vocab_size if hasattr(a__ , "vocab_size" ) else config.encoder.vocab_size
snake_case_ = random_input_ids(a__ , a__ , a__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(a__ , decoder_input_ids=a__ , training=a__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(a__ , training=a__ )
snake_case_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> Callable[[], None]:
'''simple docstring'''
snake_case_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case_ = (
hasattr(a__ , "architectures" )
and isinstance(config.architectures , a__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case_ = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case_ = __import__("transformers" , fromlist=[model_class] )
snake_case_ = getattr(a__ , a__ )
snake_case_ = model_cls(a__ )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a__ )
# encoder-decoder has vocab size saved differently
snake_case_ = config.vocab_size if hasattr(a__ , "vocab_size" ) else config.encoder.vocab_size
snake_case_ = random_input_ids(a__ , a__ , a__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case_ = model(a__ , decoder_input_ids=a__ , labels=a__ , training=a__ )[0]
snake_case_ = tf.gradients(a__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case_ = model(a__ , labels=a__ , training=a__ )[0]
snake_case_ = tf.gradients(a__ , model.trainable_variables )
return gradients
snake_case_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCAmelCase__ ( self , a__ ) -> float:
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(a__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case_ = timeit.repeat(
a__ , repeat=self.args.repeat , number=10 , )
return min(a__ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def lowerCAmelCase__ ( self , a__ ) -> [Memory, MemorySummary]:
'''simple docstring'''
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
snake_case_ = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
snake_case_ = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
snake_case_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case_ = nvml.nvmlDeviceGetMemoryInfo(a__ )
snake_case_ = meminfo.used
snake_case_ = Memory(a__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
snake_case_ = None
else:
snake_case_ = measure_peak_memory_cpu(a__ )
snake_case_ = Memory(a__ ) if isinstance(a__ , a__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case_ = stop_memory_tracing(a__ )
if memory is None:
snake_case_ = summary.total
else:
snake_case_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 85 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : int = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Any = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__snake_case : Optional[int] = 1_0_2_4
__snake_case : List[Any] = 4_0_9_6
__snake_case : List[Any] = 2_4
__snake_case : Optional[Any] = 1_6
__snake_case : str = [5, 1_1, 1_7, 2_3]
__snake_case : List[str] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__snake_case : Union[str, Any] = (1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
__snake_case : Tuple = 7_6_8
__snake_case : Any = [1, 1, 1, 0.5]
__snake_case : Any = [2_5_6, 5_1_2, 7_6_8, 7_6_8]
__snake_case : Any = 1_5_0
__snake_case : Optional[Any] = 1_6
__snake_case : List[str] = (1, 3_8_4, 3_8_4)
__snake_case : Tuple = False
__snake_case : Optional[Any] = "project"
if "ade" in checkpoint_url:
__snake_case : Optional[int] = True
__snake_case : List[str] = 7_6_8
__snake_case : int = [1, 1, 1, 0.5]
__snake_case : Any = 1_5_0
__snake_case : Tuple = 1_6
__snake_case : List[str] = "huggingface/label-files"
__snake_case : Union[str, Any] = "ade20k-id2label.json"
__snake_case : List[str] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
__snake_case : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : str = {v: k for k, v in idalabel.items()}
__snake_case : Tuple = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__snake_case : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__snake_case : Optional[Any] = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__snake_case : Optional[int] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__snake_case : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__snake_case : Union[str, Any] = name.replace("proj" , "projection" )
if "blocks" in name:
__snake_case : int = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__snake_case : Tuple = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__snake_case : Any = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__snake_case : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__snake_case : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__snake_case : Dict = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__snake_case : Union[str, Any] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__snake_case : List[Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__snake_case : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__snake_case : List[str] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__snake_case : Optional[int] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__snake_case : Optional[int] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__snake_case : int = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__snake_case : Any = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__snake_case : List[Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__snake_case : Tuple = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__snake_case : List[str] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__snake_case : str = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__snake_case : List[str] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__snake_case : Tuple = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__snake_case : int = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__snake_case : Optional[Any] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__snake_case : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__snake_case : Dict = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__snake_case : Union[str, Any] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__snake_case : Union[str, Any] = name.replace("pretrained" , "dpt" )
if "bn" in name:
__snake_case : Tuple = name.replace("bn" , "batch_norm" )
if "head" in name:
__snake_case : Dict = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__snake_case : Optional[int] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__snake_case : Tuple = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__snake_case : str = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__snake_case : Tuple = name.replace(".." , "." )
if "stem.conv" in name:
__snake_case : int = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__snake_case : Any = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__snake_case : Optional[int] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__snake_case : List[Any] = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__snake_case : Optional[int] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__snake_case : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__snake_case : Optional[Any] = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : int = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__snake_case : Any = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : str = in_proj_weight[: config.hidden_size, :]
__snake_case : List[Any] = in_proj_bias[: config.hidden_size]
__snake_case : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : int = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
__snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Optional[int] = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__snake_case : Optional[int] = torch.load(__lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__snake_case : Optional[int] = state_dict.pop(__lowerCamelCase )
__snake_case : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
__snake_case : Dict = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
__snake_case : str = 4_8_0 if "ade" in checkpoint_url else 3_8_4
__snake_case : Any = DPTImageProcessor(size=__lowerCamelCase )
__snake_case : int = prepare_img()
__snake_case : Union[str, Any] = image_processor(__lowerCamelCase , return_tensors="pt" )
# forward pass
__snake_case : Dict = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
if show_prediction:
__snake_case : int = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
_snake_case : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 123 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : jnp.ndarray
class SCREAMING_SNAKE_CASE__ (nn.Module ):
__lowerCamelCase : int
__lowerCamelCase : Tuple[int] = (16, 32, 96, 256)
__lowerCamelCase : jnp.dtype = jnp.floataa
def snake_case_ ( self):
lowercase__ : str = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ : int = []
for i in range(len(self.block_out_channels) - 1):
lowercase__ : Optional[int] = self.block_out_channels[i]
lowercase__ : Optional[Any] = self.block_out_channels[i + 1]
lowercase__ : Tuple = nn.Conv(
a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a)
lowercase__ : List[str] = nn.Conv(
a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(a)
lowercase__ : Tuple = blocks
lowercase__ : Tuple = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a):
lowercase__ : Any = self.conv_in(a)
lowercase__ : Optional[int] = nn.silu(a)
for block in self.blocks:
lowercase__ : Dict = block(a)
lowercase__ : Optional[Any] = nn.silu(a)
lowercase__ : List[Any] = self.conv_out(a)
return embedding
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ (nn.Module , __snake_case , __snake_case ):
__lowerCamelCase : int = 32
__lowerCamelCase : int = 4
__lowerCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowerCamelCase : Union[bool, Tuple[bool]] = False
__lowerCamelCase : Tuple[int] = (320, 640, 1280, 1280)
__lowerCamelCase : int = 2
__lowerCamelCase : Union[int, Tuple[int]] = 8
__lowerCamelCase : Optional[Union[int, Tuple[int]]] = None
__lowerCamelCase : int = 1280
__lowerCamelCase : float = 0.0
__lowerCamelCase : bool = False
__lowerCamelCase : jnp.dtype = jnp.floataa
__lowerCamelCase : bool = True
__lowerCamelCase : int = 0
__lowerCamelCase : str = "rgb"
__lowerCamelCase : Tuple[int] = (16, 32, 96, 256)
def snake_case_ ( self , a):
# init input tensors
lowercase__ : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase__ : str = jnp.zeros(a , dtype=jnp.floataa)
lowercase__ : List[str] = jnp.ones((1,) , dtype=jnp.intaa)
lowercase__ : Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
lowercase__ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase__ : Dict = jnp.zeros(a , dtype=jnp.floataa)
lowercase__ : Dict = jax.random.split(a)
lowercase__ : Any = {'params': params_rng, 'dropout': dropout_rng}
return self.init(a , a , a , a , a)["params"]
def snake_case_ ( self):
lowercase__ : Dict = self.block_out_channels
lowercase__ : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase__ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
lowercase__ : Dict = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase__ : Dict = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
lowercase__ : Dict = FlaxTimestepEmbedding(a , dtype=self.dtype)
lowercase__ : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase__ : List[str] = self.only_cross_attention
if isinstance(a , a):
lowercase__ : Tuple = (only_cross_attention,) * len(self.down_block_types)
if isinstance(a , a):
lowercase__ : Any = (num_attention_heads,) * len(self.down_block_types)
# down
lowercase__ : Any = []
lowercase__ : Union[str, Any] = []
lowercase__ : Union[str, Any] = block_out_channels[0]
lowercase__ : Optional[int] = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
for i, down_block_type in enumerate(self.down_block_types):
lowercase__ : List[Any] = output_channel
lowercase__ : Any = block_out_channels[i]
lowercase__ : List[Any] = i == len(a) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase__ : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase__ : str = FlaxDownBlockaD(
in_channels=a , out_channels=a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a)
for _ in range(self.layers_per_block):
lowercase__ : Any = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
if not is_final_block:
lowercase__ : Dict = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(a)
lowercase__ : str = down_blocks
lowercase__ : Dict = controlnet_down_blocks
# mid
lowercase__ : Any = block_out_channels[-1]
lowercase__ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase__ : List[Any] = nn.Conv(
a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , a , a , a , a , a = 1.0 , a = True , a = False , ):
lowercase__ : Optional[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase__ : Dict = jnp.flip(a , axis=1)
# 1. time
if not isinstance(a , jnp.ndarray):
lowercase__ : Dict = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(a , jnp.ndarray) and len(timesteps.shape) == 0:
lowercase__ : Optional[Any] = timesteps.astype(dtype=jnp.floataa)
lowercase__ : Optional[int] = jnp.expand_dims(a , 0)
lowercase__ : List[str] = self.time_proj(a)
lowercase__ : Union[str, Any] = self.time_embedding(a)
# 2. pre-process
lowercase__ : List[str] = jnp.transpose(a , (0, 2, 3, 1))
lowercase__ : int = self.conv_in(a)
lowercase__ : Dict = jnp.transpose(a , (0, 2, 3, 1))
lowercase__ : Union[str, Any] = self.controlnet_cond_embedding(a)
sample += controlnet_cond
# 3. down
lowercase__ : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(a , a):
lowercase__ : int = down_block(a , a , a , deterministic=not train)
else:
lowercase__ : int = down_block(a , a , deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
lowercase__ : Tuple = self.mid_block(a , a , a , deterministic=not train)
# 5. contronet blocks
lowercase__ : List[str] = ()
for down_block_res_sample, controlnet_block in zip(a , self.controlnet_down_blocks):
lowercase__ : Optional[int] = controlnet_block(a)
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase__ : Tuple = controlnet_down_block_res_samples
lowercase__ : List[str] = self.controlnet_mid_block(a)
# 6. scaling
lowercase__ : List[str] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a , mid_block_res_sample=a)
| 355 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 216 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 259 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase( _a, unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = ort.SessionOptions()
_lowercase : List[str] = False
return options
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
_lowercase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
_lowercase : Optional[int] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting', revision='onnx', safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : str = 'A red cat sitting on a park bench'
_lowercase : Union[str, Any] = np.random.RandomState(0)
_lowercase : str = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, mask_image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCamelCase, output_type='np', )
_lowercase : Optional[int] = output.images
_lowercase : Optional[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : Optional[int] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
_lowercase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
_lowercase : Any = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting', subfolder='scheduler', revision='onnx')
_lowercase : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting', revision='onnx', scheduler=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=lowerCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'A red cat sitting on a park bench'
_lowercase : List[Any] = np.random.RandomState(0)
_lowercase : Dict = pipe(
prompt=lowerCamelCase, image=lowerCamelCase, mask_image=lowerCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCamelCase, output_type='np', )
_lowercase : int = output.images
_lowercase : str = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowercase : int = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
| 84 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 84 | 1 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase_ : Optional[Any] = """"""
UpperCAmelCase_ : int = """"""
UpperCAmelCase_ : List[str] = """"""
UpperCAmelCase_ : Any = """"""
def _A (__a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tweepy.OAuthHandler(__a , __a )
auth.set_access_token(__a , __a )
SCREAMING_SNAKE_CASE_ : Dict = tweepy.API(__a )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE_ : List[Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE_ : int = api.user_timeline(screen_name=__a , count=2_00 )
# save most recent tweets
alltweets.extend(__a )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : Optional[int] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__a ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE_ : Dict = api.user_timeline(
screen_name=__a , count=2_00 , max_id=__a )
# save most recent tweets
alltweets.extend(__a )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : List[str] = alltweets[-1].id - 1
print(f'...{len(__a )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE_ : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , '''w''' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = csv.writer(__a )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(__a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 91 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'deberta-v2'
def __init__( self : Optional[Any], lowerCamelCase : Optional[int]=12_8100, lowerCamelCase : List[Any]=1536, lowerCamelCase : Dict=24, lowerCamelCase : Any=24, lowerCamelCase : Union[str, Any]=6144, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Union[str, Any]=512, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Any=0.02, lowerCamelCase : int=1E-7, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Union[str, Any]=-1, lowerCamelCase : Tuple=0, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int=None, lowerCamelCase : Dict=0, lowerCamelCase : Tuple="gelu", **lowerCamelCase : Optional[int], )-> Union[str, Any]:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Optional[Any] =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : int =type_vocab_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Tuple =relative_attention
lowerCamelCase__ : Optional[Any] =max_relative_positions
lowerCamelCase__ : List[Any] =pad_token_id
lowerCamelCase__ : int =position_biased_input
# Backwards compatibility
if type(lowerCamelCase ) == str:
lowerCamelCase__ : Union[str, Any] =[x.strip() for x in pos_att_type.lower().split('''|''' )]
lowerCamelCase__ : Tuple =pos_att_type
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Dict =kwargs.get('''pooler_hidden_size''', lowerCamelCase )
lowerCamelCase__ : Tuple =pooler_dropout
lowerCamelCase__ : List[Any] =pooler_hidden_act
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : List[str] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Any ={0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case ( self : List[str] )-> int:
return 12
def snake_case ( self : str, lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional["TensorType"] = None, lowerCamelCase : int = 3, lowerCamelCase : int = 40, lowerCamelCase : int = 40, lowerCamelCase : "PreTrainedTokenizerBase" = None, )-> Mapping[str, Any]:
lowerCamelCase__ : List[Any] =super().generate_dummy_inputs(preprocessor=lowerCamelCase, framework=lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 238 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def a( A : List[Any]=None , A : List[str]=None ) -> Tuple:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=A )
@dataclass
class _lowercase :
"""simple docstring"""
__A = field(
metadata={"help": "The csv file to plot."}, )
__A = field(
default=lowerCAmelCase, metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."}, )
__A = field(
default=lowerCAmelCase, metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."}, )
__A = field(
default=lowerCAmelCase, metadata={"help": "Disable logarithmic scale when plotting"}, )
__A = field(
default=lowerCAmelCase, metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
}, )
__A = field(
default=lowerCAmelCase, metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."}, )
__A = list_field(
default=lowerCAmelCase, metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def a( A : Dict ) -> List[Any]:
"""simple docstring"""
try:
int(A )
return True
except ValueError:
return False
def a( A : Any ) -> str:
"""simple docstring"""
try:
float(A )
return True
except ValueError:
return False
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = args
a = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
a = csv.DictReader(lowerCamelCase_ )
for row in reader:
a = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
a = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
a = float(row["result"] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = plt.subplots()
a = "Time usage" if self.args.is_time else "Memory usage"
a = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
a = sorted(set(self.result_dict[model_name]["bsz"] ) )
a = sorted(set(self.result_dict[model_name]["seq_len"] ) )
a = self.result_dict[model_name]["result"]
((a) , (a)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
a = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
a = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase_ , )
else:
a = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((a) , (a)) = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
a = np.asarray(lowerCamelCase_ , lowerCamelCase_ )[: len(lowerCamelCase_ )]
plt.scatter(
lowerCamelCase_ , lowerCamelCase_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(lowerCamelCase_ , lowerCamelCase_ , "--" )
title_str += F''' {label_model_name} vs.'''
a = title_str[:-4]
a = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(lowerCamelCase_ )
plt.xlabel(lowerCamelCase_ )
plt.ylabel(lowerCamelCase_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def a( ) -> List[str]:
"""simple docstring"""
a = HfArgumentParser(A )
a = parser.parse_args_into_dataclasses()[0]
a = Plot(args=A )
plot.plot()
if __name__ == "__main__":
main()
| 71 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowercase: Any = None
_lowercase: Optional[int] = logging.get_logger(__name__)
_lowercase: str = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase: Tuple = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowercase: List[Any] = {
"google/rembert": 256,
}
_lowercase: Dict = "▁"
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = RemBertTokenizer
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<unk>" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<pad>" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ):
"""simple docstring"""
a = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = False if not self.vocab_file else True
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase_ ) )
return
a = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 71 | 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
_UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase = new_cost_f
_UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = -1
_UpperCAmelCase = set()
_UpperCAmelCase = set()
_UpperCAmelCase = {source: 0}
_UpperCAmelCase = {destination: 0}
_UpperCAmelCase = {source: None}
_UpperCAmelCase = {destination: None}
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase , _UpperCAmelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
_UpperCAmelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_UpperCAmelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase = shortest_distance
return shortest_path_distance
_a = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
_a = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Tuple:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__UpperCamelCase , )
assert hasattr(self , 'env' )
def __a ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Tuple = {
'enabled': True,
'processes_per_host': 8,
}
snake_case__ : Any = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
snake_case__ : Optional[int] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
snake_case__ : int = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=__UpperCamelCase , py_version='py36' , )
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
TrainingJobAnalytics(__UpperCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.create_estimator(__UpperCamelCase )
# run training
estimator.fit()
# result dataframe
snake_case__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
snake_case__ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase )
| 143 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Tuple = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
__A : Dict = 'src/transformers'
# Matches is_xxx_available()
__A : Dict = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__A : int = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__A : List[Any] = re.compile(r'^\s*try:')
# Catches a line with else:
__A : Any = re.compile(r'^\s*else:')
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
if _re_test_backend.search(lowerCamelCase_ ) is None:
return None
snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )]
backends.sort()
return "_and_".join(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
snake_case_ : List[Any] = 0
while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ : Union[str, Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case_ : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase_ ):
snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0]
snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ )
if single_line_import_search is not None:
snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case_ : Union[str, Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case_ : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None:
snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ )
snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_between_brackets.search(lowerCamelCase_ ) is not None:
snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ )
snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0]
objects.extend(lowerCamelCase_ )
elif _re_quote_object.search(lowerCamelCase_ ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ : List[Any] = []
while (
line_index < len(lowerCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case_ : Union[str, Any] = lines[line_index]
snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case_ : Dict = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case_ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case_ : Dict = lines[line_index]
snake_case_ : Any = _re_import.search(lowerCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case_ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ):
'''simple docstring'''
def find_duplicates(lowerCamelCase_ :Union[str, Any] ):
return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case_ : Optional[int] = []
for key in import_dict_objects.keys():
snake_case_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
snake_case_ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" )
snake_case_ : Dict = parse_init(lowerCamelCase_ )
if objects is not None:
snake_case_ : Any = analyze_results(*lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase_ ) )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for path, directories, files in os.walk(lowerCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) )
snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) )
snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase_ )
return submodules
__A : List[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCAmelCase ( ):
'''simple docstring'''
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ )
snake_case_ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f:
snake_case_ : str = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) )
snake_case_ : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase_ ) > 0:
snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 8 | 1 |
import logging
import os
from .state import PartialState
class lowercase ( logging.LoggerAdapter ):
@staticmethod
def A__ ( A__):
lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def A__ ( self ,A__ ,A__ ,*A__ ,**A__):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''')
lowercase = kwargs.pop('''main_process_only''' ,snake_case_)
lowercase = kwargs.pop('''in_order''' ,snake_case_)
if self.isEnabledFor(snake_case_):
if self._should_log(snake_case_):
lowercase = self.process(snake_case_ ,snake_case_)
self.logger.log(snake_case_ ,snake_case_ ,*snake_case_ ,**snake_case_)
elif in_order:
lowercase = PartialState()
for i in range(state.num_processes):
if i == state.process_index:
lowercase = self.process(snake_case_ ,snake_case_)
self.logger.log(snake_case_ ,snake_case_ ,*snake_case_ ,**snake_case_)
state.wait_for_everyone()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = None ):
'''simple docstring'''
if log_level is None:
lowercase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowerCAmelCase__ )
lowercase = logging.getLogger(lowerCAmelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase__ , {} )
| 101 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase__ =True
except ImportError:
lowercase__ =False
lowercase__ =logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( lowerCAmelCase__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class UpperCamelCase__ ( __lowercase ):
@staticmethod
def lowerCAmelCase (snake_case_ : ArgumentParser ):
__a : List[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=snake_case_ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=snake_case_ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=snake_case_ )
def __init__(self : Dict , snake_case_ : bool , snake_case_ : str , snake_case_ : Dict=None , *snake_case_ : Optional[Any] ):
__a : Union[str, Any] = testing
__a : List[Any] = testing_file
__a : Any = path
def lowerCAmelCase (self : int ):
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__a : Union[str, Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:2_2]]
if len(snake_case_ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
__a : Union[str, Any] = (
Path(snake_case_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__a : Union[str, Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(snake_case_ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
__a : List[Any] = json.load(snake_case_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=snake_case_ , extra_context=snake_case_ , )
__a : List[str] = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:2_2]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
__a : Optional[Any] = json.load(snake_case_ )
__a : str = configuration['''lowercase_modelname''']
__a : int = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f"{directory}/configuration.json" )
__a : Any = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__a : Dict = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__a : Optional[int] = '''Flax''' in generate_tensorflow_pytorch_and_flax
__a : Dict = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
os.makedirs(snake_case_ , exist_ok=snake_case_ )
os.makedirs(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}" , exist_ok=snake_case_ )
# Tests require submodules as they have parent imports
with open(f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py" , '''w''' ):
pass
shutil.move(
f"{directory}/__init__.py" , f"{model_dir}/__init__.py" , )
shutil.move(
f"{directory}/configuration_{lowercase_model_name}.py" , f"{model_dir}/configuration_{lowercase_model_name}.py" , )
def remove_copy_lines(snake_case_ : Union[str, Any] ):
with open(snake_case_ , '''r''' ) as f:
__a : Union[str, Any] = f.readlines()
with open(snake_case_ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(snake_case_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_{lowercase_model_name}.py" , f"{model_dir}/modeling_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_tf_{lowercase_model_name}.py" , f"{model_dir}/modeling_tf_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_tf_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py" )
if output_flax:
if not self._testing:
remove_copy_lines(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/modeling_flax_{lowercase_model_name}.py" , f"{model_dir}/modeling_flax_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/test_modeling_flax_{lowercase_model_name}.py" , f"{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py" , )
else:
os.remove(f"{directory}/modeling_flax_{lowercase_model_name}.py" )
os.remove(f"{directory}/test_modeling_flax_{lowercase_model_name}.py" )
shutil.move(
f"{directory}/{lowercase_model_name}.md" , f"{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md" , )
shutil.move(
f"{directory}/tokenization_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}.py" , )
shutil.move(
f"{directory}/tokenization_fast_{lowercase_model_name}.py" , f"{model_dir}/tokenization_{lowercase_model_name}_fast.py" , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(snake_case_ : str , snake_case_ : str , snake_case_ : List[str] ):
# Create temp file
__a , __a : Tuple = mkstemp()
__a : Optional[Any] = False
with fdopen(snake_case_ , '''w''' ) as new_file:
with open(snake_case_ ) as old_file:
for line in old_file:
new_file.write(snake_case_ )
if line_to_copy_below in line:
__a : Tuple = True
for line_to_copy in lines_to_copy:
new_file.write(snake_case_ )
if not line_found:
raise ValueError(f"Line {line_to_copy_below} was not found in file." )
# Copy the file permissions from the old file to the new file
copymode(snake_case_ , snake_case_ )
# Remove original file
remove(snake_case_ )
# Move new file
move(snake_case_ , snake_case_ )
def skip_units(snake_case_ : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(snake_case_ : int ):
with open(snake_case_ ) as datafile:
__a : List[Any] = []
__a : int = False
__a : Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__a : Optional[Any] = line.split('''"''' )[1]
__a : Dict = skip_units(snake_case_ )
elif "# Below: " in line and "##" not in line:
__a : str = line.split('''"''' )[1]
__a : Any = skip_units(snake_case_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(snake_case_ , snake_case_ , snake_case_ )
__a : str = []
elif "# Replace with" in line and "##" not in line:
__a : Optional[int] = []
elif "##" not in line:
lines_to_copy.append(snake_case_ )
remove(snake_case_ )
replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py" )
os.rmdir(snake_case_ )
| 216 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
# load base model
__snake_case: Optional[int] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
__snake_case: List[str] = load_file(a_)
__snake_case: Optional[Any] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case: Tuple = key.split(""".""")[0].split(LORA_PREFIX_TEXT_ENCODER + """_""")[-1].split("""_""")
__snake_case: int = pipeline.text_encoder
else:
__snake_case: Optional[Any] = key.split(""".""")[0].split(LORA_PREFIX_UNET + """_""")[-1].split("""_""")
__snake_case: Tuple = pipeline.unet
# find the target layer
__snake_case: Union[str, Any] = layer_infos.pop(0)
while len(a_) > -1:
try:
__snake_case: List[Any] = curr_layer.__getattr__(a_)
if len(a_) > 0:
__snake_case: List[str] = layer_infos.pop(0)
elif len(a_) == 0:
break
except Exception:
if len(a_) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
__snake_case: str = layer_infos.pop(0)
__snake_case: Dict = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up"""))
pair_keys.append(a_)
else:
pair_keys.append(a_)
pair_keys.append(key.replace("""lora_up""" , """lora_down"""))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
__snake_case: Union[str, Any] = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
__snake_case: Dict = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(a_ , a_).unsqueeze(2).unsqueeze(3)
else:
__snake_case: List[str] = state_dict[pair_keys[0]].to(torch.floataa)
__snake_case: Optional[Any] = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(a_ , a_)
# update visited list
for item in pair_keys:
visited.append(a_)
return pipeline
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
__UpperCAmelCase : str = parser.parse_args()
__UpperCAmelCase : Any = args.base_model_path
__UpperCAmelCase : List[Any] = args.checkpoint_path
__UpperCAmelCase : int = args.dump_path
__UpperCAmelCase : List[Any] = args.lora_prefix_unet
__UpperCAmelCase : int = args.lora_prefix_text_encoder
__UpperCAmelCase : Optional[int] = args.alpha
__UpperCAmelCase : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase : str = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 367 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293 | 0 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _snake_case ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :Union[str, Any] = job["""started_at"""]
lowerCAmelCase_ :Dict = job["""completed_at"""]
lowerCAmelCase_ :Union[str, Any] = date_parser.parse(lowercase__ )
lowerCAmelCase_ :Any = date_parser.parse(lowercase__ )
lowerCAmelCase_ :List[str] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase_ :List[Any] = start
lowerCAmelCase_ :Tuple = end
lowerCAmelCase_ :List[str] = duration_in_min
return job_info
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Dict=None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :int = None
if token is not None:
lowerCAmelCase_ :Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
lowerCAmelCase_ :int = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase_ :Dict = requests.get(lowercase__ , headers=lowercase__ ).json()
lowerCAmelCase_ :Union[str, Any] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase__ ) for job in result["""jobs"""]} )
lowerCAmelCase_ :List[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(lowercase__ ):
lowerCAmelCase_ :Tuple = requests.get(url + f"""&page={i + 2}""" , headers=lowercase__ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase__ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = get_job_time(args.workflow_run_id)
__UpperCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v["duration"]}""")
| 84 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE_ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE_ : List[str] = {
'RUCAIBox/mvp': 1_0_2_4,
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = MvpTokenizer
def __init__( self: Dict , UpperCamelCase: List[str]=None , UpperCamelCase: Optional[int]=None , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Tuple="replace" , UpperCamelCase: Dict="<s>" , UpperCamelCase: List[str]="</s>" , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Optional[Any]="<s>" , UpperCamelCase: Optional[int]="<unk>" , UpperCamelCase: Optional[Any]="<pad>" , UpperCamelCase: int="<mask>" , UpperCamelCase: int=False , UpperCamelCase: int=True , **UpperCamelCase: Any , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
A__ = getattr(UpperCamelCase , pre_tok_state.pop("""type""" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**UpperCamelCase )
A__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ = """post_processor"""
A__ = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state["""sep"""] )
if "cls" in state:
A__ = tuple(state["""cls"""] )
A__ = False
if state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get("""trim_offsets""" , UpperCamelCase ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(UpperCamelCase , state.pop("""type""" ) )
A__ = component_class(**UpperCamelCase )
setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase )
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
A__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else value
A__ = value
def UpperCamelCase ( self: int , *UpperCamelCase: Union[str, Any] , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: int , *UpperCamelCase: Tuple , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = kwargs.get("""is_split_into_words""" , UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ):
"""simple docstring"""
A__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def UpperCamelCase ( self: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=None ):
"""simple docstring"""
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self: List[Any] , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 69 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _snake_case ( UpperCAmelCase_ : int="ro" , UpperCAmelCase_ : Optional[int]="en" , UpperCAmelCase_ : List[Any]="wmt16" , UpperCAmelCase_ : str=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
A__ = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
A__ = datasets.load_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
if save_dir is None:
A__ = F"""{dataset}-{pair}"""
A__ = Path(UpperCAmelCase_ )
save_dir.mkdir(exist_ok=UpperCAmelCase_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
A__ = """val""" if split == """validation""" else split
A__ = save_dir.joinpath(F"""{fn}.source""" )
A__ = save_dir.joinpath(F"""{fn}.target""" )
A__ = src_path.open("""w+""" )
A__ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A__ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 69 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =KandinskyVaaControlnetPipeline
UpperCamelCase__ : Dict =["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase__ : int =["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCamelCase__ : List[str] =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase__ : Tuple =False
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return 32
@property
def __lowercase ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def __lowercase ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowercase ( self ):
"""simple docstring"""
return 100
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Tuple ={
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__UpperCamelCase : Optional[Any] =UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def __lowercase ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.dummy_unet
__UpperCamelCase : List[Any] =self.dummy_movq
__UpperCamelCase : int =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , )
__UpperCamelCase : Optional[int] ={
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create hint
__UpperCamelCase : List[str] =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Optional[int] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : List[Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='cpu'
__UpperCamelCase : Tuple =self.get_dummy_components()
__UpperCamelCase : Any =self.pipeline_class(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =output.images
__UpperCamelCase : Optional[Any] =pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__UpperCamelCase : Tuple =image[0, -3:, -3:, -1]
__UpperCamelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : List[Any] =np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
__UpperCamelCase : Union[str, Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
__UpperCamelCase : Any =torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 255.0
__UpperCamelCase : int =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__UpperCamelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__UpperCamelCase : Dict =KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
__UpperCamelCase : List[Any] =pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple ='A robot, 4k photo'
__UpperCamelCase : Optional[int] =torch.Generator(device='cuda' ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase : List[str] =pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__UpperCamelCase : Any =torch.Generator(device='cuda' ).manual_seed(0 )
__UpperCamelCase : str =pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , output_type='np' , )
__UpperCamelCase : Optional[int] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 71 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ :Optional[Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def A ( a_ ) -> List[Any]:
__UpperCamelCase : Any =['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_ ,a_ )
A_ :int = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def A ( a_ ) -> Union[str, Any]:
__UpperCamelCase : str =list(s_dict.keys() )
for key in keys:
__UpperCamelCase : str =key
for k, v in WHISPER_MAPPING.items():
if k in key:
__UpperCamelCase : Optional[Any] =new_key.replace(a_ ,a_ )
print(F'{key} -> {new_key}' )
__UpperCamelCase : Dict =s_dict.pop(a_ )
return s_dict
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : Tuple =emb.weight.shape
__UpperCamelCase : Tuple =nn.Linear(a_ ,a_ ,bias=a_ )
__UpperCamelCase : List[Any] =emb.weight.data
return lin_layer
def A ( a_ ,a_ ) -> bytes:
os.makedirs(a_ ,exist_ok=a_ )
__UpperCamelCase : Optional[int] =os.path.basename(a_ )
__UpperCamelCase : Union[str, Any] =url.split('/' )[-2]
__UpperCamelCase : Union[str, Any] =os.path.join(a_ ,a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(a_ ):
__UpperCamelCase : str =open(a_ ,'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(a_ ) as source, open(a_ ,'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) ,ncols=80 ,unit='iB' ,unit_scale=a_ ,unit_divisor=1_024 ) as loop:
while True:
__UpperCamelCase : Optional[Any] =source.read(8_192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
__UpperCamelCase : List[Any] =open(a_ ,'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def A ( a_ ,a_ ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
__UpperCamelCase : int =_download(_MODELS[checkpoint_path] )
else:
__UpperCamelCase : List[str] =torch.load(a_ ,map_location='cpu' )
__UpperCamelCase : Union[str, Any] =original_checkpoint['dims']
__UpperCamelCase : List[Any] =original_checkpoint['model_state_dict']
__UpperCamelCase : Dict =state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
__UpperCamelCase : List[str] =True
__UpperCamelCase : str =state_dict['decoder.layers.0.fc1.weight'].shape[0]
__UpperCamelCase : Optional[int] =WhisperConfig(
vocab_size=dimensions['n_vocab'] ,encoder_ffn_dim=a_ ,decoder_ffn_dim=a_ ,num_mel_bins=dimensions['n_mels'] ,d_model=dimensions['n_audio_state'] ,max_target_positions=dimensions['n_text_ctx'] ,encoder_layers=dimensions['n_audio_layer'] ,encoder_attention_heads=dimensions['n_audio_head'] ,decoder_layers=dimensions['n_text_layer'] ,decoder_attention_heads=dimensions['n_text_state'] ,max_source_positions=dimensions['n_audio_ctx'] ,)
__UpperCamelCase : List[str] =WhisperForConditionalGeneration(a_ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =model.model.load_state_dict(a_ ,strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
__UpperCamelCase : Optional[int] =make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__UpperCamelCase : List[str] =proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
A_ :List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A_ :List[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[Any] = '''visual_bert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=512 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=False , _lowercase=True , _lowercase=1 , _lowercase=0 , _lowercase=2 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = visual_embedding_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = bypass_transformer
_lowerCAmelCase = special_visual_initialize
| 362 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self , _lowercase , _lowercase = None , _lowercase = None ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCAmelCase = torch.zeros(_lowercase , _lowercase )
else:
_lowerCAmelCase = None
_lowerCAmelCase = torch.nn.Parameter(_lowercase )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : VQModel
_lowercase : CLIPTextModel
_lowercase : CLIPTokenizer
_lowercase : TransformeraDModel
_lowercase : LearnedClassifierFreeSamplingEmbeddings
_lowercase : VQDiffusionScheduler
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCAmelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
_lowerCAmelCase = [""""""] * batch_size
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
_lowercase , padding="""max_length""" , max_length=_lowercase , truncation=_lowercase , return_tensors="""pt""" , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = negative_prompt_embeds.shape[1]
_lowerCAmelCase = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
_lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = 100 , _lowercase = 5.0 , _lowercase = 1.0 , _lowercase = 1 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = 1
elif isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = len(_lowercase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_lowercase )}' )
_lowerCAmelCase = batch_size * num_images_per_prompt
_lowerCAmelCase = guidance_scale > 1.0
_lowerCAmelCase = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowercase )}.' )
# get the initial completely masked latents unless the user supplied it
_lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCAmelCase = self.transformer.num_vector_embeds - 1
_lowerCAmelCase = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
_lowerCAmelCase = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCAmelCase = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = model_output.chunk(2 )
_lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
_lowerCAmelCase = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
_lowerCAmelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = self.vqvae.config.vq_embed_dim
_lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
_lowerCAmelCase = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = torch.sort(_lowercase , 1 , descending=_lowercase )
_lowerCAmelCase = torch.exp(_lowercase )
_lowerCAmelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
_lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCAmelCase = keep_mask[:, :-1, :]
_lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCAmelCase = log_p_x_0.clone()
_lowerCAmelCase = -torch.inf # -inf = log(0)
return rv
| 229 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum"
SCREAMING_SNAKE_CASE : Tuple = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
SCREAMING_SNAKE_CASE : str = "summarizer"
SCREAMING_SNAKE_CASE : str = AutoTokenizer
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
SCREAMING_SNAKE_CASE : Optional[int] = ["text"]
def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]:
return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase )
def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple:
return self.model.generate(**_UpperCamelCase )[0]
def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any:
return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) | 8 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] )-> None:
'''simple docstring'''
A__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ),1 )
self.assertEqual(x.component(2 ),3 )
A__ = Vector()
def snake_case__ ( self : List[Any] )-> None:
'''simple docstring'''
A__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(lowercase_ ),'(0,0,0,0,0,1)' )
def snake_case__ ( self : Optional[int] )-> None:
'''simple docstring'''
A__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(lowercase_ ),4 )
def snake_case__ ( self : Tuple )-> None:
'''simple docstring'''
A__ = Vector([1, 2] )
A__ = Vector([1, 2, 3, 4, 5] )
A__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
A__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length(),2.236,3 )
self.assertAlmostEqual(y.euclidean_length(),7.416,3 )
self.assertEqual(z.euclidean_length(),0 )
self.assertAlmostEqual(w.euclidean_length(),7.616,3 )
def snake_case__ ( self : List[Any] )-> None:
'''simple docstring'''
A__ = Vector([1, 2, 3] )
A__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ),2 )
self.assertEqual((x + y).component(1 ),3 )
self.assertEqual((x + y).component(2 ),4 )
def snake_case__ ( self : Any )-> None:
'''simple docstring'''
A__ = Vector([1, 2, 3] )
A__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ),0 )
self.assertEqual((x - y).component(1 ),1 )
self.assertEqual((x - y).component(2 ),2 )
def snake_case__ ( self : List[str] )-> None:
'''simple docstring'''
A__ = Vector([1, 2, 3] )
A__ = Vector([2, -1, 4] ) # for test of dot product
A__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ),'(3.0,6.0,9.0)' )
self.assertEqual((a * b),0 )
def snake_case__ ( self : int )-> None:
'''simple docstring'''
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ),1_0 )
def snake_case__ ( self : Optional[Any] )-> None:
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3,1 ) ),'(0,1,0)' )
def snake_case__ ( self : int )-> None:
'''simple docstring'''
A__ = Vector([1, 2, 3] )
A__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2,lowercase_,lowercase_ ) ),'(3,4,7)' )
def snake_case__ ( self : Any )-> None:
'''simple docstring'''
A__ = Vector([1, 0, 0, 0, 0, 0] )
A__ = x.copy()
self.assertEqual(str(lowercase_ ),str(lowercase_ ) )
def snake_case__ ( self : Tuple )-> None:
'''simple docstring'''
A__ = Vector([1, 0, 0] )
x.change_component(0,0 )
x.change_component(1,1 )
self.assertEqual(str(lowercase_ ),'(0,1,0)' )
def snake_case__ ( self : Union[str, Any] )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n',str(lowercase_ ) )
def snake_case__ ( self : Any )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
A__ = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y],a.minor(lowercase_,lowercase_ ) )
def snake_case__ ( self : Any )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
A__ = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y],a.cofactor(lowercase_,lowercase_ ) )
def snake_case__ ( self : List[str] )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
self.assertEqual(-5,a.determinant() )
def snake_case__ ( self : Dict )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]],3,3 )
A__ = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)',str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n',str(a * 2 ) )
def snake_case__ ( self : Tuple )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
a.change_component(0,2,5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n',str(lowercase_ ) )
def snake_case__ ( self : List[Any] )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
self.assertEqual(7,a.component(2,1 ),0.01 )
def snake_case__ ( self : str )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
A__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]],3,3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n',str(a + b ) )
def snake_case__ ( self : List[Any] )-> None:
'''simple docstring'''
A__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]],3,3 )
A__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]],3,3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n',str(a - b ) )
def snake_case__ ( self : Optional[Any] )-> None:
'''simple docstring'''
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n',str(square_zero_matrix(5 ) ),)
if __name__ == "__main__":
unittest.main()
| 356 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 282 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: list[list[Edge]] =[[] for _ in range(UpperCAmelCase_)]
lowerCamelCase__: List[Any] =size
def __getitem__(self : List[str] , UpperCAmelCase_ : int) ->Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex])
@property
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
return self._size
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->int | None:
'''simple docstring'''
lowerCamelCase__: Any =deque([start_vertex])
lowerCamelCase__: list[int | None] =[None] * self.size
lowerCamelCase__: List[Any] =0
while queue:
lowerCamelCase__: int =queue.popleft()
lowerCamelCase__: Dict =distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase__: Optional[Any] =current_distance + edge.weight
lowerCamelCase__: Tuple =distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase_ , UpperCAmelCase_)
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase__: str =new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = """sigmoid"""
__magic_name__ :Optional[Any] = """softmax"""
__magic_name__ :Optional[Any] = """none"""
@add_end_docstrings(
a , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = False
__magic_name__ :Dict = ClassificationFunction.NONE
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = tokenizer_kwargs
lowerCAmelCase__ :List[Any] = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None:
lowerCAmelCase__ :int = top_k
lowerCAmelCase__ :Dict = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , )
if return_all_scores:
lowerCAmelCase__ :List[Any] = None
else:
lowerCAmelCase__ :Union[str, Any] = 1
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase__ :List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs
if isinstance(args[0] , __UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.framework
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase__ :str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply
else:
lowerCAmelCase__ :Dict = ClassificationFunction.NONE
lowerCAmelCase__ :int = model_outputs['logits'][0]
lowerCAmelCase__ :Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase__ :int = softmax(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase__ :Tuple = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase__ :Any = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k is not None:
lowerCAmelCase__ :List[str] = dict_scores[:top_k]
return dict_scores
| 293 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a : Optional[Any]= {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class UpperCamelCase ( unittest.TestCase ):
UpperCAmelCase : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase (self : Any) -> Dict:
__snake_case : List[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt')
__snake_case : Optional[int] = text_classifier('This is great !')
self.assertEqual(nested_simplify(_A) , [{'label': 'LABEL_0', 'score': 0.504}])
__snake_case : Optional[int] = text_classifier('This is great !' , top_k=2)
self.assertEqual(
nested_simplify(_A) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}])
__snake_case : Dict = text_classifier(['This is great !', 'This is bad'] , top_k=2)
self.assertEqual(
nested_simplify(_A) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
__snake_case : List[str] = text_classifier('This is great !' , top_k=1)
self.assertEqual(nested_simplify(_A) , [{'label': 'LABEL_0', 'score': 0.504}])
# Legacy behavior
__snake_case : str = text_classifier('This is great !' , return_all_scores=_A)
self.assertEqual(nested_simplify(_A) , [{'label': 'LABEL_0', 'score': 0.504}])
__snake_case : List[Any] = text_classifier('This is great !' , return_all_scores=_A)
self.assertEqual(
nested_simplify(_A) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]])
__snake_case : Optional[Any] = text_classifier(['This is great !', 'Something else'] , return_all_scores=_A)
self.assertEqual(
nested_simplify(_A) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
__snake_case : Any = text_classifier(['This is great !', 'Something else'] , return_all_scores=_A)
self.assertEqual(
nested_simplify(_A) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def _lowercase (self : Any) -> int:
import torch
__snake_case : Union[str, Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu') , )
__snake_case : List[str] = text_classifier('This is great !')
self.assertEqual(nested_simplify(_A) , [{'label': 'LABEL_0', 'score': 0.504}])
@require_tf
def _lowercase (self : Union[str, Any]) -> List[str]:
__snake_case : Tuple = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf')
__snake_case : Dict = text_classifier('This is great !')
self.assertEqual(nested_simplify(_A) , [{'label': 'LABEL_0', 'score': 0.504}])
@slow
@require_torch
def _lowercase (self : Optional[int]) -> Dict:
__snake_case : str = pipeline('text-classification')
__snake_case : str = text_classifier('This is great !')
self.assertEqual(nested_simplify(_A) , [{'label': 'POSITIVE', 'score': 1.0}])
__snake_case : Union[str, Any] = text_classifier('This is bad !')
self.assertEqual(nested_simplify(_A) , [{'label': 'NEGATIVE', 'score': 1.0}])
__snake_case : Union[str, Any] = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(_A) , [{'label': 'POSITIVE', 'score': 0.988}])
@slow
@require_tf
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
__snake_case : Tuple = pipeline('text-classification' , framework='tf')
__snake_case : Dict = text_classifier('This is great !')
self.assertEqual(nested_simplify(_A) , [{'label': 'POSITIVE', 'score': 1.0}])
__snake_case : str = text_classifier('This is bad !')
self.assertEqual(nested_simplify(_A) , [{'label': 'NEGATIVE', 'score': 1.0}])
__snake_case : Optional[int] = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(_A) , [{'label': 'POSITIVE', 'score': 0.988}])
def _lowercase (self : Tuple , _A : List[Any] , _A : Dict , _A : Optional[int]) -> Any:
__snake_case : List[Any] = TextClassificationPipeline(model=_A , tokenizer=_A)
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase (self : str , _A : List[str] , _A : Dict) -> List[Any]:
__snake_case : Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__snake_case : str = 'HuggingFace is in'
__snake_case : Optional[Any] = text_classifier(_A)
self.assertEqual(nested_simplify(_A) , [{'label': ANY(_A), 'score': ANY(_A)}])
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
__snake_case : List[str] = ['HuggingFace is in ', 'Paris is in France']
__snake_case : Any = text_classifier(_A)
self.assertEqual(
nested_simplify(_A) , [{'label': ANY(_A), 'score': ANY(_A)}, {'label': ANY(_A), 'score': ANY(_A)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__snake_case : List[str] = text_classifier(_A , top_k=_A)
__snake_case : Optional[Any] = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(_A) , [[{'label': ANY(_A), 'score': ANY(_A)}] * N, [{'label': ANY(_A), 'score': ANY(_A)}] * N] , )
__snake_case : Union[str, Any] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
__snake_case : Optional[Any] = text_classifier(_A)
self.assertEqual(
nested_simplify(_A) , {'label': ANY(_A), 'score': ANY(_A)} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__snake_case : Optional[Any] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(_A):
text_classifier(_A)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__snake_case : List[str] = text_classifier([[['HuggingFace is in ', 'Paris is in France']]])
self.assertEqual(
nested_simplify(_A) , [{'label': ANY(_A), 'score': ANY(_A)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
| 95 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_a : Tuple= {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : str = """albert"""
def __init__(self : Union[str, Any] , _A : int=3_00_00 , _A : Any=1_28 , _A : Tuple=40_96 , _A : int=12 , _A : Tuple=1 , _A : int=64 , _A : Optional[Any]=1_63_84 , _A : Optional[Any]=1 , _A : List[str]="gelu_new" , _A : Any=0 , _A : Optional[Any]=0 , _A : List[Any]=5_12 , _A : List[Any]=2 , _A : Dict=0.02 , _A : Union[str, Any]=1E-12 , _A : Tuple=0.1 , _A : int="absolute" , _A : List[Any]=0 , _A : str=2 , _A : int=3 , **_A : Tuple , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
__snake_case : str = vocab_size
__snake_case : List[Any] = embedding_size
__snake_case : Tuple = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[str] = num_hidden_groups
__snake_case : Dict = num_attention_heads
__snake_case : Any = inner_group_num
__snake_case : Union[str, Any] = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : List[str] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : Optional[int] = type_vocab_size
__snake_case : Optional[Any] = initializer_range
__snake_case : List[Any] = layer_norm_eps
__snake_case : Optional[Any] = classifier_dropout_prob
__snake_case : List[str] = position_embedding_type
class UpperCamelCase ( lowercase ):
@property
def _lowercase (self : Tuple) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : Optional[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 95 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 | 1 |
from typing import Any
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
return np.array_equal(UpperCamelCase__, matrix.conjugate().T )
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray, UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
UpperCamelCase__ = v.conjugate().T
UpperCamelCase__ = v_star.dot(UpperCamelCase__ )
assert isinstance(UpperCamelCase__, np.ndarray )
return (v_star_dot.dot(UpperCamelCase__ )) / (v_star.dot(UpperCamelCase__ ))
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase__ = np.array([[1], [2], [3]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(UpperCamelCase__, UpperCamelCase__ ) )
UpperCamelCase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(UpperCamelCase__, UpperCamelCase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 35 | import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowercase = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
lowercase = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
lowercase = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Dict ):
'''simple docstring'''
UpperCamelCase__ = simple_accuracy(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = float(fa_score(y_true=UpperCamelCase__, y_pred=UpperCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : str ):
'''simple docstring'''
UpperCamelCase__ = np.array(UpperCamelCase__ )
UpperCamelCase__ = np.array(UpperCamelCase__ )
UpperCamelCase__ = en_sentvecs.shape[0]
# mean centering
UpperCamelCase__ = en_sentvecs - np.mean(UpperCamelCase__, axis=0 )
UpperCamelCase__ = in_sentvecs - np.mean(UpperCamelCase__, axis=0 )
UpperCamelCase__ = cdist(UpperCamelCase__, UpperCamelCase__, '''cosine''' )
UpperCamelCase__ = np.array(range(UpperCamelCase__ ) )
UpperCamelCase__ = sim.argsort(axis=1 )[:, :10]
UpperCamelCase__ = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def A_ ( self : Optional[Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def A_ ( self : str , _a : Dict , _a : Tuple ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_a , _a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_a , _a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 35 | 1 |
"""simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 2**power
__a = str(snake_case_ )
__a = list(snake_case_ )
__a = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
SCREAMING_SNAKE_CASE__:Optional[int] = solution(power)
print("""Sum of the digits is: """, result)
| 261 | '''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_A : List[Any] = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_A : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase_ ( ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = """https://pypi.org/pypi/diffusers/json"""
__lowerCAmelCase = json.loads(request.urlopen(snake_case_ ).read() )["""releases"""].keys()
return sorted(snake_case_ , key=lambda snake_case_ : version.Version(snake_case_ ) )
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = Path(snake_case_ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] ) -> Dict:
'''simple docstring'''
init_hf_modules()
__lowerCAmelCase = Path(snake_case_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__lowerCAmelCase = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def UpperCamelCase_ ( snake_case_ : int ) -> str:
'''simple docstring'''
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
# Imports of the form `import .xxx`
__lowerCAmelCase = re.findall("""^\s*import\s+\.(\S+)\s*$""" , snake_case_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , snake_case_ , flags=re.MULTILINE )
# Unique-ify
return list(set(snake_case_ ) )
def UpperCamelCase_ ( snake_case_ : int ) -> Dict:
'''simple docstring'''
__lowerCAmelCase = False
__lowerCAmelCase = [module_file]
__lowerCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
__lowerCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(snake_case_ ) )
__lowerCAmelCase = Path(snake_case_ ).parent
__lowerCAmelCase = [str(module_path / m ) for m in new_imports]
__lowerCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
__lowerCAmelCase = [f"""{f}.py""" for f in new_import_files]
__lowerCAmelCase = len(snake_case_ ) == 0
all_relative_imports.extend(snake_case_ )
return all_relative_imports
def UpperCamelCase_ ( snake_case_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase = f.read()
# Imports of the form `import xxx`
__lowerCAmelCase = re.findall("""^\s*import\s+(\S+)\s*$""" , snake_case_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , snake_case_ , flags=re.MULTILINE )
# Only keep the top-level module
__lowerCAmelCase = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__lowerCAmelCase = list(set(snake_case_ ) )
__lowerCAmelCase = []
for imp in imports:
try:
importlib.import_module(snake_case_ )
except ImportError:
missing_packages.append(snake_case_ )
if len(snake_case_ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{", ".join(snake_case_ )}. Run `pip install {" ".join(snake_case_ )}`""" )
return get_relative_imports(snake_case_ )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = module_path.replace(os.path.sep , """.""" )
__lowerCAmelCase = importlib.import_module(snake_case_ )
if class_name is None:
return find_pipeline_class(snake_case_ )
return getattr(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
__lowerCAmelCase = dict(inspect.getmembers(snake_case_ , inspect.isclass ) )
__lowerCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , snake_case_ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
__lowerCAmelCase = cls
return pipeline_class
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , ) -> Any:
'''simple docstring'''
__lowerCAmelCase = str(snake_case_ )
__lowerCAmelCase = os.path.join(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
__lowerCAmelCase = module_file_or_url
__lowerCAmelCase = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__lowerCAmelCase = get_diffusers_versions()
# cut ".dev0"
__lowerCAmelCase = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__lowerCAmelCase = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__lowerCAmelCase = f"""v{revision}"""
elif revision == "main":
__lowerCAmelCase = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
__lowerCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=snake_case_ , pipeline=snake_case_ )
try:
__lowerCAmelCase = cached_download(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , )
__lowerCAmelCase = """git"""
__lowerCAmelCase = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__lowerCAmelCase = hf_hub_download(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , use_auth_token=snake_case_ , )
__lowerCAmelCase = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__lowerCAmelCase = check_imports(snake_case_ )
# Now we move the module inside our cached dynamic modules.
__lowerCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(snake_case_ )
__lowerCAmelCase = Path(snake_case_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(snake_case_ , submodule_path / module_file )
for module_needed in modules_needed:
__lowerCAmelCase = f"""{module_needed}.py"""
shutil.copy(os.path.join(snake_case_ , snake_case_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = use_auth_token
elif use_auth_token is True:
__lowerCAmelCase = HfFolder.get_token()
else:
__lowerCAmelCase = None
__lowerCAmelCase = model_info(snake_case_ , revision=snake_case_ , token=snake_case_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__lowerCAmelCase = submodule_path / commit_hash
__lowerCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(snake_case_ )
if not (submodule_path / module_file).exists():
shutil.copy(snake_case_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
snake_case_ , f"""{module_needed}.py""" , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
return os.path.join(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Union[str, os.PathLike] , snake_case_ : str , snake_case_ : Optional[str] = None , snake_case_ : Optional[Union[str, os.PathLike]] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[Dict[str, str]] = None , snake_case_ : Optional[Union[bool, str]] = None , snake_case_ : Optional[str] = None , snake_case_ : bool = False , **snake_case_ : Dict , ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = get_cached_module_file(
snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , )
return get_class_in_module(snake_case_ , final_module.replace(""".py""" , """""" ) )
| 229 | 0 |
from typing import List
import numpy as np
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowerCamelCase_ = max(lists_lengths.values() , default=0 )
return max(1 , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = []
for group_idx in range(lowerCamelCase__ ):
lowerCamelCase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ = range(lowerCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(lowerCamelCase__ )
return shards_indices_per_group
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = _number_of_shards_in_gen_kwargs(lowerCamelCase__ )
if num_shards == 1:
return [dict(lowerCamelCase__ )]
else:
lowerCamelCase_ = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(lowerCamelCase__ ) )
]
def lowerCamelCase_ ( lowerCamelCase__ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )}
lowerCamelCase_ = {}
for size in list_sizes:
lowerCamelCase_ = list(range(lowerCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ = dict(lowerCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]]
return shuffled_kwargs
| 47 |
from __future__ import annotations
import math
def lowerCamelCase_ ( lowerCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = str(lowerCamelCase__ )
lowerCamelCase_ = [n]
for i in range(1 , len(lowerCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(str(lowerCamelCase__ ) ) > 3:
if not is_prime(int(str(lowerCamelCase__ )[-3:] ) ) or not is_prime(int(str(lowerCamelCase__ )[:3] ) ):
return False
return True
def lowerCamelCase_ ( lowerCamelCase__ = 1_1 ):
lowerCamelCase_ = []
lowerCamelCase_ = 1_3
while len(lowerCamelCase__ ) != count:
if validate(lowerCamelCase__ ):
lowerCamelCase_ = list_truncated_nums(lowerCamelCase__ )
if all(is_prime(lowerCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(lowerCamelCase__ )
num += 2
return list_truncated_primes
def lowerCamelCase_ ( ):
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 47 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowercase = {
'''google/pegasus-xsum''': 5_12,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = VOCAB_FILES_NAMES
_lowerCamelCase: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = PegasusTokenizer
_lowerCamelCase: Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple ,A_ : str=None ,A_ : Any=None ,A_ : List[Any]="<pad>" ,A_ : List[Any]="</s>" ,A_ : Tuple="<unk>" ,A_ : Any="<mask_2>" ,A_ : List[str]="<mask_1>" ,A_ : List[Any]=None ,A_ : Dict=103 ,**A_ : Optional[Any] ,) -> Dict:
A = offset
if additional_special_tokens is not None:
if not isinstance(A_ ,A_ ):
raise TypeError(
F'additional_special_tokens should be of type {type(A_ )}, but is'
F' {type(A_ )}' )
A = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(A_ ) ,self.offset - 1 )
]
if len(set(A_ ) ) != len(A_ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
A = additional_special_tokens_extended
else:
A = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 ,self.offset )]
super().__init__(
A_ ,tokenizer_file=A_ ,pad_token=A_ ,eos_token=A_ ,unk_token=A_ ,mask_token=A_ ,mask_token_sent=A_ ,offset=A_ ,additional_special_tokens=A_ ,**A_ ,)
A = vocab_file
A = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> int:
A = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List ,A_ : Optional[List] = None ,A_ : bool = False ) -> List[Any]:
if already_has_special_tokens:
return self._special_token_mask(A_ )
elif token_ids_a is None:
return self._special_token_mask(A_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Any=None ) -> Optional[Any]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Optional[str] = None ) -> int:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file ,A_ )
return (out_vocab_file,) | 74 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 9
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = kruskal(lowercase, lowercase )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowercase ) == sorted(lowercase )
| 364 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowercase : list[int], lowercase : list[int], lowercase : int ) -> tuple[float, list[float]]:
"""simple docstring"""
_UpperCamelCase = list(range(len(lowercase ) ) )
_UpperCamelCase = [v / w for v, w in zip(lowercase, lowercase )]
index.sort(key=lambda lowercase : ratio[i], reverse=lowercase )
_UpperCamelCase = 0
_UpperCamelCase = [0] * len(lowercase )
for i in index:
if weight[i] <= capacity:
_UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
_UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 | 0 |
UpperCAmelCase : str = 0 # The first color of the flag.
UpperCAmelCase : Any = 1 # The second color of the flag.
UpperCAmelCase : int = 2 # The third color of the flag.
UpperCAmelCase : List[Any] = (red, white, blue)
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not sequence:
return []
if len(SCREAMING_SNAKE_CASE ) == 1:
return list(SCREAMING_SNAKE_CASE )
a__ : Tuple =0
a__ : Tuple =len(SCREAMING_SNAKE_CASE ) - 1
a__ : Optional[int] =0
while mid <= high:
if sequence[mid] == colors[0]:
a__ , a__ : Tuple =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a__ , a__ : Dict =sequence[high], sequence[mid]
high -= 1
else:
a__ : Tuple =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[Any] = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 95 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return []
a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =int(max_value - min_value ) + 1
a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 95 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __a( __a ):
"""simple docstring"""
lowerCAmelCase = """xglm"""
lowerCAmelCase = ["""past_key_values"""]
lowerCAmelCase = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self ,_SCREAMING_SNAKE_CASE=256_008 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]:
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : Optional[int] = d_model
UpperCAmelCase_ : List[Any] = ffn_dim
UpperCAmelCase_ : Optional[Any] = num_layers
UpperCAmelCase_ : int = attention_heads
UpperCAmelCase_ : int = activation_function
UpperCAmelCase_ : int = dropout
UpperCAmelCase_ : Any = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : Optional[Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = init_std
UpperCAmelCase_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ : Optional[Any] = use_cache
super().__init__(
pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,decoder_start_token_id=a__ ,**a__ ,) | 360 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase_ : str = BlipImageProcessor()
UpperCAmelCase_ : Dict = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).tokenizer
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE ).image_processor
def a__ ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
UpperCAmelCase_ : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_SCREAMING_SNAKE_CASE ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : str = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : int = processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> int:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = '''lower newer'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = tokenizer(_SCREAMING_SNAKE_CASE ,return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[str] = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Any = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE ,image_processor=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = '''lower newer'''
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Any = processor(text=_SCREAMING_SNAKE_CASE ,images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['''pixel_values''', '''input_ids''', '''attention_mask'''] ) | 235 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case__ : Optional[int] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def lowerCamelCase ( self : int ):
snake_case__ : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
snake_case__ : Union[str, Any] = get_activation("""gelu""" )
snake_case__ : Optional[Any] = get_activation("""gelu_10""" )
snake_case__ : Any = torch_builtin(snake_case_ )
snake_case__ : Any = geluaa(snake_case_ )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase ( self : Union[str, Any] ):
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(snake_case_ ):
get_activation("""bogus""" )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = get_activation("""gelu""" )
snake_case__ : Tuple = 1
snake_case__ : Dict = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
snake_case__ : Union[str, Any] = acta.a
| 35 |
'''simple docstring'''
import string
from math import logaa
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : List[str] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : List[str] = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[int, int]:
snake_case__ : Dict = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : Any = corpus_without_punctuation.split("""\n""" )
snake_case__ : int = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_lowerCAmelCase ))
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
return round(tf * idf , 3 )
| 35 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowerCAmelCase : int =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = ['input_features', 'attention_mask']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=1_6000 , __lowerCAmelCase=80 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = num_mel_bins
lowercase = do_ceptral_normalize
lowercase = normalize_means
lowercase = normalize_vars
lowercase = True
def A__ ( self , __lowerCAmelCase , ):
"""simple docstring"""
lowercase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
lowercase = ta_kaldi.fbank(__lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def A__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 0.0 , ):
"""simple docstring"""
if normalize_means:
lowercase = x[:input_length].mean(axis=0 )
lowercase = np.subtract(__lowerCAmelCase , __lowerCAmelCase )
if normalize_vars:
lowercase = x[:input_length].std(axis=0 )
lowercase = np.divide(__lowerCAmelCase , __lowerCAmelCase )
if input_length < x.shape[0]:
lowercase = padding_value
# make sure array is in float32
lowercase = x.astype(np.floataa )
return x
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__lowerCAmelCase , __lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__lowerCAmelCase , __lowerCAmelCase )
]
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowercase = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowercase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
lowercase = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase = [raw_speech]
# extract fbank features
lowercase = [self._extract_fbank_features(__lowerCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowercase = BatchFeature({"""input_features""": features} )
lowercase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
# make sure list is in array format
lowercase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __lowerCAmelCase ):
lowercase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
lowercase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowercase = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase = (
np.array(__lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__lowerCAmelCase )
if return_tensors is not None:
lowercase = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
| 32 | """simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def A ( *_a : Optional[Any] , **_a : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
A__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def A ( self : Tuple , _a : Union[str, Any] , _a : Optional[int] , _a : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_SCREAMING_SNAKE_CASE =[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def A ( self : List[str] , _a : Any , _a : Tuple ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =object_detector(examples[0] , threshold=0.0 )
_SCREAMING_SNAKE_CASE =len(_a )
self.assertGreater(_a , 0 )
self.assertEqual(
_a , [
{
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
}
for i in range(_a )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
pass
@require_torch
def A ( self : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
_SCREAMING_SNAKE_CASE =object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def A ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =pipeline('zero-shot-object-detection' )
_SCREAMING_SNAKE_CASE =object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
_SCREAMING_SNAKE_CASE =object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def A ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
@slow
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0.2
_SCREAMING_SNAKE_CASE =pipeline('zero-shot-object-detection' )
_SCREAMING_SNAKE_CASE =object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =pipeline('zero-shot-object-detection' )
_SCREAMING_SNAKE_CASE =object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_a , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 47 |
'''simple docstring'''
class A__ :
def __init__( self : Union[str, Any] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =[0] * size
_SCREAMING_SNAKE_CASE =[0] * size
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def A ( self : Tuple , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =value
while index < self.size:
_SCREAMING_SNAKE_CASE =self.get_prev(_a ) + 1
if current_left_border == index:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =max(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.get_next(_a )
def A ( self : int , _a : int , _a : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_SCREAMING_SNAKE_CASE =0
while left <= right:
_SCREAMING_SNAKE_CASE =self.get_prev(_a )
if left <= current_left:
_SCREAMING_SNAKE_CASE =max(_a , self.tree[right] )
_SCREAMING_SNAKE_CASE =current_left
else:
_SCREAMING_SNAKE_CASE =max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ = 8.988E9 # units = N * m^s * C^-2
def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
SCREAMING_SNAKE_CASE__ : int =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
SCREAMING_SNAKE_CASE__ : str =abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
SCREAMING_SNAKE_CASE__ : int =abs(UpperCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
SCREAMING_SNAKE_CASE__ : Dict =(COULOMBS_CONSTANT * charge_product / abs(UpperCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 222 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__ : List[Any] =repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ : List[Any] =sorted([comment for comment in issue.get_comments()], key=lambda UpperCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 222 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Tuple = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : List[Any] = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 73 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 0 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool:
# Base Case
if index == len(_SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(_SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Color current vertex
a__: Dict = i
# Validate coloring
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
a__: Tuple = -1
return False
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
a__: List[Any] = [-1] * len(_SCREAMING_SNAKE_CASE )
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 203 | """simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowercase):
a__: Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Any = FlaxAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowercase):
a__: str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Any = FlaxAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
a__: Dict = AutoTokenizer.from_pretrained(lowercase)
a__: Union[str, Any] = FlaxBertModel.from_pretrained(lowercase)
a__: List[str] = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowercase):
return model(**lowercase)
eval(**lowercase).block_until_ready()
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
a__: Optional[Any] = AutoTokenizer.from_pretrained(lowercase)
a__: Any = FlaxRobertaModel.from_pretrained(lowercase)
a__: int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**lowercase):
return model(**lowercase)
eval(**lowercase).block_until_ready()
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier'):
a__: str = FlaxAutoModel.from_pretrained('bert-base')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__: List[str] = FlaxAutoModel.from_pretrained(lowercase , revision='aaaaaa')
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
a__: List[str] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model'):
a__: List[str] = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 203 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a__ ( snake_case__ ):
_a : str = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_A ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase = deprecated_arg[3:]
setattr(self , _A , not kwargs.pop(_A ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__lowerCAmelCase = kwargs.pop("torchscript" , self.torchscript )
__lowerCAmelCase = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
__lowerCAmelCase = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**_A )
_a : bool = field(default=snake_case__ , metadata={"""help""": """Trace the models using torchscript"""} )
_a : bool = field(default=snake_case__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
_a : str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
__lowerCAmelCase = torch.device("cpu" )
__lowerCAmelCase = 0
elif is_torch_tpu_available():
__lowerCAmelCase = xm.xla_device()
__lowerCAmelCase = 0
else:
__lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__lowerCAmelCase = torch.cuda.device_count()
return device, n_gpu
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.n_gpu > 0
| 92 |
import requests
a__ = '''YOUR API KEY'''
def __UpperCAmelCase ( __a : str ,__a : str = giphy_api_key ) -> list:
"""simple docstring"""
_a : Optional[Any] = '''+'''.join(query.split() )
_a : Union[str, Any] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
_a : List[Any] = requests.get(__a ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 235 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = tokenizer(example["content"] , truncation=lowerCamelCase__ )["input_ids"]
lowercase__ : Optional[int] = len(example["content"] ) / len(output["input_ids"] )
return output
lowerCAmelCase__ = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase__ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__ = multiprocessing.cpu_count()
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 365 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_5_0, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def snake_case ( self : str ):
# create estimator
lowercase__ : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
lowercase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ : str = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase__ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , SCREAMING_SNAKE_CASE )
| 121 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase_ : Union[str, Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
UpperCAmelCase_ : List[Any] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
UpperCAmelCase_ : Any = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]="auto" , SCREAMING_SNAKE_CASE__ : Any=-1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.9 , SCREAMING_SNAKE_CASE__ : List[Any]=5 , SCREAMING_SNAKE_CASE__ : Tuple=5_0_0 , SCREAMING_SNAKE_CASE__ : Dict="gpt2-large" , SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 , SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_5 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=2_5 , ) -> Dict:
a_ : Dict = compute_mauve(
p_text=SCREAMING_SNAKE_CASE__ , q_text=SCREAMING_SNAKE_CASE__ , p_features=SCREAMING_SNAKE_CASE__ , q_features=SCREAMING_SNAKE_CASE__ , p_tokens=SCREAMING_SNAKE_CASE__ , q_tokens=SCREAMING_SNAKE_CASE__ , num_buckets=SCREAMING_SNAKE_CASE__ , pca_max_data=SCREAMING_SNAKE_CASE__ , kmeans_explained_var=SCREAMING_SNAKE_CASE__ , kmeans_num_redo=SCREAMING_SNAKE_CASE__ , kmeans_max_iter=SCREAMING_SNAKE_CASE__ , featurize_model_name=SCREAMING_SNAKE_CASE__ , device_id=SCREAMING_SNAKE_CASE__ , max_text_length=SCREAMING_SNAKE_CASE__ , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE__ , mauve_scaling_factor=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , seed=SCREAMING_SNAKE_CASE__ , )
return out
| 32 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = TextToVideoSDPipeline
snake_case__ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
a_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
a_ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
a_ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
a_ : Dict = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
a_ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
a_ : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
a_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
a_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ : Dict = self.get_dummy_components()
a_ : str = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
a_ : Dict = 'np'
a_ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE__ ).frames
a_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
a_ : Union[str, Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
a_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
a_ : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a_ : Optional[Any] = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : Optional[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2_5 , output_type='pt' ).frames
a_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
a_ : Tuple = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
a_ : Tuple = pipe.to('cuda' )
a_ : Any = 'Spiderman is surfing'
a_ : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a_ : List[Any] = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='pt' ).frames
a_ : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 32 | 1 |
"""simple docstring"""
def __A ( ) -> Optional[int]:
__a : Any = 0
for i in range(1 , 10_01):
total += i**i
return str(lowerCAmelCase__)[-10:]
if __name__ == "__main__":
print(solution()) | 361 |
"""simple docstring"""
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __A ( a_ :float) -> float:
if edge <= 0 or not isinstance(a_ , a_):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 188 | 0 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_UpperCAmelCase : str = logging.get_logger(__name__)
def A ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ) -> Optional[Any]:
'''simple docstring'''
if "." in tensor_name:
UpperCamelCase = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase = getattr(lowercase , lowercase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(lowercase , lowercase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(lowercase )
elif isinstance(lowercase , torch.Tensor ):
UpperCamelCase = value.to('cpu' )
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCamelCase = torch.tensor(lowercase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase ) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(lowercase , requires_grad=lowercase , **lowercase ).to(lowercase )
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(lowercase , requires_grad=lowercase , **lowercase ).to(lowercase )
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowercase ) )
else:
if value is None:
UpperCamelCase = old_value.to(lowercase )
elif isinstance(lowercase , torch.Tensor ):
UpperCamelCase = value.to(lowercase )
else:
UpperCamelCase = torch.tensor(lowercase , device=lowercase )
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(lowercase , requires_grad=old_value.requires_grad )
UpperCamelCase = new_value
def A ( lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=False ) -> str:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(lowercase )
if (isinstance(lowercase , nn.Linear ) or isinstance(lowercase , lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase , lowercase ):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
lowercase , lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
lowercase , lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase )
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
lowercase , lowercase , lowercase , lowercase , has_been_replaced=lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A ( lowercase , lowercase=None , lowercase=None , lowercase=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
lowercase , lowercase , lowercase , lowercase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def A ( *lowercase , **lowercase ) -> str:
'''simple docstring'''
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowercase , )
return replace_with_bnb_linear(*lowercase , **lowercase )
def A ( *lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowercase , )
return set_module_quantized_tensor_to_device(*lowercase , **lowercase )
def A ( lowercase ) -> List[str]:
'''simple docstring'''
UpperCamelCase = deepcopy(lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase , lowercase ):
UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(lowercase , [] )
UpperCamelCase = len(lowercase ) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(lowercase ) - set(lowercase )
UpperCamelCase = list(set(lowercase ) ) + list(lowercase )
# remove ".weight" from the keys
UpperCamelCase = ['.weight', '.bias']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(lowercase , '' )
filtered_module_names.append(lowercase )
return filtered_module_names
| 222 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
UpperCamelCase = 0
if start < end:
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase , UpperCamelCase = _in_place_partition(lowercase , lowercase , lowercase )
count += _in_place_quick_sort(lowercase , lowercase , p - 1 )
count += _in_place_quick_sort(lowercase , p + 1 , lowercase )
return count
def A ( lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = randint(lowercase , lowercase )
UpperCamelCase = a[end]
UpperCamelCase = a[pivot]
UpperCamelCase = temp
UpperCamelCase = start - 1
for index in range(lowercase , lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
UpperCamelCase = new_pivot_index + 1
UpperCamelCase = a[new_pivot_index]
UpperCamelCase = a[index]
UpperCamelCase = temp
UpperCamelCase = a[new_pivot_index + 1]
UpperCamelCase = a[end]
UpperCamelCase = temp
return new_pivot_index + 1, count
_UpperCAmelCase : Union[str, Any] = TemporaryFile()
_UpperCAmelCase : List[Any] = 100 # 1000 elements are to be sorted
_UpperCAmelCase ,_UpperCAmelCase : Any = 0, 1 # mean and standard deviation
_UpperCAmelCase : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_UpperCAmelCase : Any = np.load(outfile)
_UpperCAmelCase : str = len(M) - 1
_UpperCAmelCase : List[str] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 222 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[Any] = 0
A : Union[str, Any] = 0
while num > 0:
A : int = num % 8
A : Optional[Any] = octal + (remainder * math.floor(math.pow(10 , snake_case__ ) ))
counter += 1
A : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(snake_case__ )}'
def lowerCAmelCase_ ( ):
'''simple docstring'''
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 311 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(0 , snake_case__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i in range(snake_case__ , 0 , -1 ):
for _ in range(snake_case__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(snake_case__ ) # upper half
reverse_floyd(snake_case__ ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
lowercase : List[str] = 1
while K:
lowercase : List[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
lowercase : Any = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 311 | 1 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( snake_case_ , unittest.TestCase ):
__UpperCAmelCase : Tuple = PriorTransformer
__UpperCAmelCase : List[str] = '''hidden_states'''
@property
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = 4
snake_case : Any = 8
snake_case : Dict = 7
snake_case : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase__ )
snake_case : List[str] = floats_tensor((batch_size, embedding_dim) ).to(UpperCamelCase__ )
snake_case : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase ( self , UpperCamelCase__=0 ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__ )
snake_case : Dict = 4
snake_case : Union[str, Any] = 8
snake_case : Dict = 7
snake_case : Tuple = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
snake_case : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
snake_case : Any = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return (4, 8)
@property
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return (4, 8)
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case ,snake_case : Union[str, Any] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCamelCase__ )
snake_case : Any = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case ,snake_case : int = self.prepare_init_args_and_inputs_for_common()
snake_case : Optional[Any] = self.model_class(**UpperCamelCase__ )
snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Tuple = [*signature.parameters.keys()]
snake_case : Tuple = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
snake_case : Dict = model.to(UpperCamelCase__ )
if hasattr(UpperCamelCase__ , "set_default_attn_processor" ):
model.set_default_attn_processor()
snake_case : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCamelCase__ )[0]
snake_case : Optional[Any] = output[0, :5].flatten().cpu()
print(UpperCamelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
snake_case : Dict = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__=1 , UpperCamelCase__=768 , UpperCamelCase__=77 , UpperCamelCase__=0 ) -> str:
'''simple docstring'''
torch.manual_seed(UpperCamelCase__ )
snake_case : Optional[int] = batch_size
snake_case : int = embedding_dim
snake_case : Optional[Any] = num_embeddings
snake_case : str = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
snake_case : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(UpperCamelCase__ )
snake_case : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCamelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(UpperCamelCase__ )
snake_case : List[Any] = self.get_dummy_seed_input(seed=UpperCamelCase__ )
with torch.no_grad():
snake_case : Optional[Any] = model(**UpperCamelCase__ )[0]
assert list(sample.shape ) == [1, 768]
snake_case : Dict = sample[0, :8].flatten().cpu()
print(UpperCamelCase__ )
snake_case : List[Any] = torch.tensor(UpperCamelCase__ )
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
| 203 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Tuple , lowercase : List[Any] , lowercase : int ) -> List[Any]:
"""simple docstring"""
if index == r:
for j in range(lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
snake_case : Union[str, Any] = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case : Any = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 203 | 1 |
"""simple docstring"""
import os
import string
import sys
_a : int = 1 << 8
_a : str = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
_a : Tuple = KEYMAP['up']
_a : Dict = KEYMAP['left']
if sys.platform == "win32":
_a : Any = []
_a : Tuple = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
_a : Tuple = ord(str(i))
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
if os.name == "nt":
import msvcrt
_lowerCAmelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCAmelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCAmelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCAmelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCAmelCase : List[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
_lowerCAmelCase : int = cha[1]
else:
_lowerCAmelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCAmelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCAmelCase : List[str] = sys.stdin.fileno()
_lowerCAmelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCAmelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ ,termios.TCSADRAIN ,lowercase__ )
return ch
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCAmelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCAmelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 362 | """simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a : Union[str, Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __A :
def __init__( self , a__ , a__=16 , a__=13 , a__=7 , a__=14 , a__=10 , a__=19 , a__=5 , a__=4 , a__=True , a__=16 , a__=2 , a__=4 , a__=4 , a__="gelu" , a__=0.1 , a__=0.1 , a__=[1, 2, 3, 4, 5] , a__=25 , a__=5 , ):
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : int = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Optional[int] = prediction_length
_lowerCAmelCase : int = context_length
_lowerCAmelCase : Optional[Any] = cardinality
_lowerCAmelCase : Tuple = num_time_features
_lowerCAmelCase : str = lags_sequence
_lowerCAmelCase : int = embedding_dimension
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = context_length
_lowerCAmelCase : Optional[int] = prediction_length + label_length
_lowerCAmelCase : Dict = label_length
_lowerCAmelCase : Dict = moving_average
_lowerCAmelCase : Union[str, Any] = autocorrelation_factor
def __A ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = config.context_length + max(config.lags_sequence )
_lowerCAmelCase : int = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowerCAmelCase : Tuple = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCAmelCase : int = floats_tensor([self.batch_size, _past_length] )
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCAmelCase : Any = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCAmelCase : Dict = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __A ( self ):
_lowerCAmelCase : Any = self.get_config()
_lowerCAmelCase : str = self.prepare_autoformer_inputs_dict(a__ )
return config, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = AutoformerModel(config=a__ ).to(a__ ).eval()
_lowerCAmelCase : int = model(**a__ )
_lowerCAmelCase : List[str] = outputs.encoder_last_hidden_state
_lowerCAmelCase : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_encoder()
encoder.save_pretrained(a__ )
_lowerCAmelCase : Optional[int] = AutoformerEncoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = model.create_network_inputs(**a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCAmelCase : Any = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowerCAmelCase : Dict = encoder(inputs_embeds=a__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowerCAmelCase : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowerCAmelCase : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowerCAmelCase : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = model.get_decoder()
decoder.save_pretrained(a__ )
_lowerCAmelCase : Any = AutoformerDecoder.from_pretrained(a__ ).to(a__ )
_lowerCAmelCase : List[Any] = decoder(
trend=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (AutoformerForPrediction,) if is_torch_available() else ()
_UpperCamelCase : int = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
def __A ( self ):
_lowerCAmelCase : Tuple = AutoformerModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_class.from_pretrained(a__ , output_loading_info=a__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Dict = inspect.signature(getattr(a__ , """forward""" ) )
# The main input is the name of the argument after `self`
_lowerCAmelCase : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class(a__ )
_lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(a__ )] , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """seq_length""" , a__ )
_lowerCAmelCase : List[str] = getattr(self.model_tester , """decoder_seq_length""" , a__ )
_lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , a__ )
_lowerCAmelCase : int = getattr(self.model_tester , """d_model""" , a__ )
_lowerCAmelCase : Optional[Any] = getattr(self.model_tester , """num_attention_heads""" , a__ )
_lowerCAmelCase : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[Any] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Optional[int] = outputs.encoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowerCAmelCase : Dict = len(a__ )
_lowerCAmelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a__ , a__ )
# decoder attentions
_lowerCAmelCase : int = outputs.decoder_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowerCAmelCase : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Optional[int] = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 2 , len(a__ ) )
_lowerCAmelCase : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __A ( self ):
super().test_retain_grad_hidden_states_attentions()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any="train-batch.pt" ) -> Optional[int]:
_lowerCAmelCase : List[Any] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=_lowerCamelCase ,repo_type="""dataset""" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase ,map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : List[Any] = prepare_batch()
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_lowerCAmelCase : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : Union[str, Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_lowerCAmelCase : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def __A ( self ):
_lowerCAmelCase : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(a__ )
_lowerCAmelCase : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_lowerCAmelCase : str = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_lowerCAmelCase : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=a__ )
_lowerCAmelCase : Optional[int] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a__ , rtol=1e-1 ) )
| 126 | 0 |
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = Path(snake_case__ )
lowerCAmelCase = Path(snake_case__ )
dest_dir.mkdir(exist_ok=snake_case__ )
for path in src_dir.iterdir():
lowerCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase = dest_dir.joinpath(path.name )
print(snake_case__ )
dest_path.open('''w''' ).write('''\n'''.join(snake_case__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 338 |
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[str] = data
_A: Node | None = None
_A: Node | None = None
def lowerCamelCase__ ( a ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase__ ( a ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase__ ( a ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase__ ( ) -> None: # Main function for testing.
_A: Optional[int] = Node(1 )
_A: int = Node(2 )
_A: str = Node(3 )
_A: Union[str, Any] = Node(4 )
_A: Dict = Node(5 )
_A: int = Node(6 )
_A: Optional[Any] = Node(7 )
_A: List[str] = Node(8 )
_A: int = Node(9 )
print(is_full_binary_tree(a ) )
print(depth_of_tree(a ) )
print('''Tree is: ''' )
display(a )
if __name__ == "__main__":
main()
| 121 | 0 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Tuple ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase )
lowercase_ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "sigmoid"
lowerCAmelCase__ = "softmax"
lowerCAmelCase__ = "none"
@add_end_docstrings(
snake_case_ , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def A__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="" , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = tokenizer_kwargs
lowercase_ = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
lowercase_ = self.model.config.return_all_scores
if isinstance(UpperCAmelCase , UpperCAmelCase ) or top_k is None:
lowercase_ = top_k
lowercase_ = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , UpperCAmelCase , )
if return_all_scores:
lowercase_ = None
else:
lowercase_ = 1
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase_ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowercase_ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = super().__call__(*UpperCAmelCase , **UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowercase_ = "top_k" not in kwargs
if isinstance(args[0] , UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Dict[str, GenericTensor]:
'''simple docstring'''
lowercase_ = self.framework
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return self.tokenizer(**UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1 and isinstance(inputs[0] , UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase , **UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return self.model(**UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=True ) -> Any:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowercase_ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowercase_ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
lowercase_ = self.model.config.function_to_apply
else:
lowercase_ = ClassificationFunction.NONE
lowercase_ = model_outputs["logits"][0]
lowercase_ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowercase_ = sigmoid(UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowercase_ = softmax(UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowercase_ = outputs
else:
raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowercase_ = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k is not None:
lowercase_ = dict_scores[:top_k]
return dict_scores
| 371 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
lowercase_ = {"bert_for_seq_generation": 512}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = []
lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any],lowercase_ : List[Any],lowercase_ : str="<s>",lowercase_ : str="</s>",lowercase_ : List[str]="<unk>",lowercase_ : Any="<pad>",lowercase_ : int="<::::>",lowercase_ : Optional[Dict[str, Any]] = None,**lowercase_ : Any,)-> None:
'''simple docstring'''
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_,eos_token=lowercase_,unk_token=lowercase_,pad_token=lowercase_,sep_token=lowercase_,sp_model_kwargs=self.sp_model_kwargs,**lowercase_,)
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
return self.sp_model.get_piece_size()
def snake_case__ ( self : List[str] )-> Optional[int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : Optional[int],lowercase_ : List[str] )-> List[Any]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Optional[int],lowercase_ : str )-> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase_,out_type=lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : str )-> Any:
'''simple docstring'''
return self.sp_model.piece_to_id(lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.sp_model.IdToPiece(lowercase_ )
return token
def snake_case__ ( self : Tuple,lowercase_ : Optional[Any] )-> int:
'''simple docstring'''
A__ = []
A__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
A__ = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_,'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 7 |
import os
def UpperCAmelCase__ ( _A : Any ):
'''simple docstring'''
a__ =len(grid[0] )
a__ =len(_A )
a__ =0
a__ =0
a__ =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_A ):
for j in range(n_rows - 3 ):
a__ =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a__ =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a__ =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a__ =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a__ =max(
_A , _A , _A , _A )
if max_product > largest:
a__ =max_product
return largest
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[]
with open(os.path.dirname(_A ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
a__ =[[int(_A ) for i in grid[j]] for j in range(len(_A ) )]
return largest_product(_A )
if __name__ == "__main__":
print(solution())
| 188 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : Optional[int] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def _lowerCamelCase ( _UpperCamelCase = 200_0000 ):
'''simple docstring'''
return sum(takewhile(lambda _UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase__ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
lowercase__ : List[str] = ["names", "prefix"]
lowercase__ : Optional[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase__ : Tuple = ["encoding_errors", "on_bad_lines"]
lowercase__ : List[Any] = ["date_format"]
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCAmelCase_ = ''','''
lowerCAmelCase_ = None
lowerCAmelCase_ = '''infer'''
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = False
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = '''.'''
lowerCAmelCase_ = None
lowerCAmelCase_ = '''"'''
lowerCAmelCase_ = 0
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = 0
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = None
lowerCAmelCase_ = 1_0000
lowerCAmelCase_ = None
lowerCAmelCase_ = '''strict'''
lowerCAmelCase_ = '''error'''
lowerCAmelCase_ = None
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
if self.delimiter is not None:
snake_case_ = self.delimiter
if self.column_names is not None:
snake_case_ = self.column_names
@property
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , A_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCAmelCase_ = CsvConfig
def snake_case__ ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self : Union[str, Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A_ , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(A_ , A_ ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(A_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(A_ , A_ ):
snake_case_ = [files]
snake_case_ = [dl_manager.iter_files(A_ ) for file in files]
splits.append(datasets.SplitGenerator(name=A_ , gen_kwargs={"files": files} ) )
return splits
def snake_case__ ( self : List[Any] , __lowercase : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
snake_case_ = self.config.features.arrow_schema
if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ):
# cheaper cast
snake_case_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=A_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(A_ , A_ )
return pa_table
def snake_case__ ( self : int , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ):
snake_case_ = pd.read_csv(A_ , iterator=A_ , dtype=A_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(A_ ):
snake_case_ = pa.Table.from_pandas(A_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A_ )
except ValueError as e:
logger.error(f"Failed to read file \'{file}\' with error {type(A_ )}: {e}" )
raise
| 187 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310 | 0 |
def lowerCAmelCase_ ( ) -> list[list[int]]:
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCAmelCase = generate_large_matrix()
_UpperCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> None:
assert all(row == sorted(UpperCamelCase_ , reverse=UpperCamelCase_ ) for row in grid )
assert all(list(UpperCamelCase_ ) == sorted(UpperCamelCase_ , reverse=UpperCamelCase_ ) for col in zip(*UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = 0
UpperCamelCase_ = len(UpperCamelCase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase_ = (left + right) // 2
UpperCamelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase_ = mid + 1
else:
UpperCamelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = 0
UpperCamelCase_ = len(grid[0] )
for i in range(len(UpperCamelCase_ ) ):
UpperCamelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase_ ) * len(grid[0] )) - total
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len([number for row in grid for number in row if number < 0] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase_ ):
if number < 0:
total += len(UpperCamelCase_ ) - i
break
return total
def lowerCAmelCase_ ( ) -> None:
from timeit import timeit
print("Running benchmarks" )
UpperCamelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase_ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.