code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_UpperCAmelCase : int = '''\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'''
_UpperCAmelCase : Union[str, Any] = '''\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'''
_UpperCAmelCase : Tuple = '''\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def _A( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def _A( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_="uniform_average" , snake_case_=True ):
lowercase =mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse}
| 72 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self : str , lowercase__ : Union[str, Any] , lowercase__ : str=13 , lowercase__ : List[str]=32 , lowercase__ : int=2 , lowercase__ : List[Any]=3 , lowercase__ : Union[str, Any]=16 , lowercase__ : List[Any]=[32, 64, 1_28] , lowercase__ : List[Any]=[1, 2, 1] , lowercase__ : int=[2, 2, 4] , lowercase__ : str=2 , lowercase__ : Union[str, Any]=2.0 , lowercase__ : Any=True , lowercase__ : List[Any]=0.0 , lowercase__ : List[str]=0.0 , lowercase__ : Union[str, Any]=0.1 , lowercase__ : Any="gelu" , lowercase__ : Dict=False , lowercase__ : Any=True , lowercase__ : List[str]=0.0_2 , lowercase__ : str=1e-5 , lowercase__ : List[Any]=True , lowercase__ : Tuple=None , lowercase__ : int=True , lowercase__ : Any=10 , lowercase__ : Optional[int]=8 , lowercase__ : int=["stage1", "stage2"] , lowercase__ : int=[1, 2] , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = patch_norm
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = is_training
_lowerCAmelCase = scope
_lowerCAmelCase = use_labels
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = encoder_stride
_lowerCAmelCase = out_features
_lowerCAmelCase = out_indices
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[Any] ):
_lowerCAmelCase = FocalNetModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase = model(_UpperCamelCase )
_lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : str , lowercase__ : str , lowercase__ : List[Any] ):
_lowerCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase = None
_lowerCAmelCase = FocalNetBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Any , lowercase__ : str ):
_lowerCAmelCase = FocalNetForMaskedImageModeling(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = FocalNetForMaskedImageModeling(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Any ):
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = FocalNetForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( A__ ,A__ ,unittest.TestCase ):
UpperCamelCase__ =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ =(
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = FocalNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , embed_dim=37 , has_text_modality=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase = model_class(_UpperCamelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Tuple ):
_lowerCAmelCase = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# FocalNet has a different seq_length
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = reshaped_hidden_states[0].shape
_lowerCAmelCase = (
reshaped_hidden_states[0].view(_UpperCamelCase , _UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = FocalNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : str ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCamelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_UpperCamelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_lowerCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCamelCase__ ( A__ ,unittest.TestCase ):
UpperCamelCase__ =(FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase__ =FocalNetConfig
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = FocalNetModelTester(self )
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase ( A__ ):
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
lowerCamelCase_ = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
lowerCamelCase_ = input_file.read()
lowerCamelCase_ = regexp.search(_UpperCamelCase )
return match
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
lowerCamelCase_ = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
lowerCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCamelCase_ = regexp.finditer(_UpperCamelCase )
lowerCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = Path('./datasets' )
lowerCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCamelCase ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = Path('./datasets' )
lowerCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCamelCase ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 42 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ (A__ ):
def __init__( self :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Optional[int]=7_68 ) -> str:
super().__init__(_UpperCamelCase )
a__ = proj_size
a__ = CLIPVisionModel(_UpperCamelCase )
a__ = PaintByExampleMapper(_UpperCamelCase )
a__ = nn.LayerNorm(config.hidden_size )
a__ = nn.Linear(config.hidden_size ,self.proj_size )
# uncondition for scaling
a__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCamelCase__( self :int ,__snake_case :Dict ,__snake_case :Dict=False ) -> Any:
a__ = self.model(pixel_values=_UpperCamelCase )
a__ = clip_output.pooler_output
a__ = self.mapper(latent_states[:, None] )
a__ = self.final_layer_norm(_UpperCamelCase )
a__ = self.proj_out(_UpperCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class snake_case_ (nn.Module ):
def __init__( self :Tuple ,__snake_case :str ) -> Optional[Any]:
super().__init__()
a__ = (config.num_hidden_layers + 1) // 5
a__ = config.hidden_size
a__ = 1
a__ = nn.ModuleList(
[
BasicTransformerBlock(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,activation_fn='gelu' ,attention_bias=_UpperCamelCase )
for _ in range(_UpperCamelCase )
] )
def lowerCamelCase__( self :Dict ,__snake_case :Optional[int] ) -> Optional[int]:
for block in self.blocks:
a__ = block(_UpperCamelCase )
return hidden_states
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__: Dict = logging.get_logger(__name__)
lowerCAmelCase__: Dict = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class snake_case_ ( A__ ):
__lowerCamelCase : Union[str, Any] = """wavlm"""
def __init__( self , __lowerCAmelCase=32 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3_072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1e-5 , __lowerCAmelCase="group" , __lowerCAmelCase="gelu" , __lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase=False , __lowerCAmelCase=128 , __lowerCAmelCase=16 , __lowerCAmelCase=320 , __lowerCAmelCase=800 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.05 , __lowerCAmelCase=10 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0 , __lowerCAmelCase=10 , __lowerCAmelCase=320 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=100 , __lowerCAmelCase=256 , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase="mean" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=256 , __lowerCAmelCase=(512, 512, 512, 512, 1_500) , __lowerCAmelCase=(5, 3, 3, 1, 1) , __lowerCAmelCase=(1, 2, 3, 1, 1) , __lowerCAmelCase=512 , __lowerCAmelCase=80 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=3 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = feat_extract_norm
SCREAMING_SNAKE_CASE_ : List[str] = feat_extract_activation
SCREAMING_SNAKE_CASE_ : Optional[int] = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = conv_bias
SCREAMING_SNAKE_CASE_ : Dict = num_buckets
SCREAMING_SNAKE_CASE_ : Any = max_bucket_distance
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE_ : Tuple = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : Dict = final_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = layerdrop
SCREAMING_SNAKE_CASE_ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = num_ctc_classes
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : str = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ : Tuple = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : str = apply_spec_augment
SCREAMING_SNAKE_CASE_ : List[str] = mask_time_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_time_length
SCREAMING_SNAKE_CASE_ : Any = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : Any = mask_feature_prob
SCREAMING_SNAKE_CASE_ : Dict = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ : Any = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_codevector_groups
SCREAMING_SNAKE_CASE_ : Optional[int] = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ : Dict = num_negatives
SCREAMING_SNAKE_CASE_ : Dict = codevector_dim
SCREAMING_SNAKE_CASE_ : Dict = proj_codevector_dim
SCREAMING_SNAKE_CASE_ : List[Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ : Any = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : Dict = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ : Optional[int] = add_adapter
SCREAMING_SNAKE_CASE_ : str = adapter_kernel_size
SCREAMING_SNAKE_CASE_ : List[str] = adapter_stride
SCREAMING_SNAKE_CASE_ : List[str] = num_adapter_layers
SCREAMING_SNAKE_CASE_ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : Tuple = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ : List[Any] = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = xvector_output_dim
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 345 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 436 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase__ =MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
UpperCAmelCase__ =load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 625 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __lowerCAmelCase (A__ ):
'''simple docstring'''
a__ = """luke"""
def __init__( self , a=5_02_67 , a=50_00_00 , a=7_68 , a=2_56 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-1_2 , a=True , a=None , a=1 , a=0 , a=2 , **a , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
snake_case_ :Optional[Any] = vocab_size
snake_case_ :Optional[Any] = entity_vocab_size
snake_case_ :List[str] = hidden_size
snake_case_ :List[Any] = entity_emb_size
snake_case_ :Tuple = num_hidden_layers
snake_case_ :str = num_attention_heads
snake_case_ :Dict = hidden_act
snake_case_ :Tuple = intermediate_size
snake_case_ :int = hidden_dropout_prob
snake_case_ :List[str] = attention_probs_dropout_prob
snake_case_ :int = max_position_embeddings
snake_case_ :Optional[int] = type_vocab_size
snake_case_ :Dict = initializer_range
snake_case_ :Any = layer_norm_eps
snake_case_ :Optional[Any] = use_entity_aware_attention
snake_case_ :Optional[Any] = classifier_dropout
| 584 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def __UpperCamelCase ( _lowerCAmelCase ) -> np.ndarray:
"""simple docstring"""
A , A , A : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __UpperCamelCase ( _lowerCAmelCase ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
"""simple docstring"""
A : Optional[int] = np.zeros_like(SCREAMING_SNAKE_CASE_ )
A : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A : Tuple = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE_:Union[str, Any] = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE_:List[Any] = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE_:List[str] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE_:Any = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE_:Union[str, Any] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 662 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
UpperCAmelCase = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
UpperCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('''RGB''' )
return image
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = val
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCAmelCase = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCAmelCase = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) )
UpperCAmelCase = qkv_bias
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = 364 if '''coco''' in model_name else 224
UpperCAmelCase = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
UpperCAmelCase = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
UpperCAmelCase = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
UpperCAmelCase = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_2001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
UpperCAmelCase = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
UpperCAmelCase = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ , qformer_config=SCREAMING_SNAKE_CASE_ )
return config, image_size
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ) -> int:
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
UpperCAmelCase = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
UpperCAmelCase = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
UpperCAmelCase , UpperCAmelCase = get_blipa_config(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
UpperCAmelCase , UpperCAmelCase = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase = original_model.state_dict()
UpperCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
UpperCAmelCase = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
UpperCAmelCase = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
UpperCAmelCase = key.replace('''t5''' , '''language''' )
UpperCAmelCase = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = load_demo_image()
UpperCAmelCase = '''What is unusual about this image?'''
# create processor
UpperCAmelCase = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = InstructBlipProcessor(
image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = processor(images=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# make sure processor creates exact same pixel values
UpperCAmelCase = vis_processors['''eval'''](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
hf_model.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
if "vicuna" in model_name:
UpperCAmelCase = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE_ ).logits
else:
UpperCAmelCase = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase = hf_model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
UpperCAmelCase = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
print('''Looks ok!''' )
print('''Generating with original model...''' )
UpperCAmelCase = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
UpperCAmelCase = hf_model.generate(
**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
UpperCAmelCase = 2
print('''Original generation:''' , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = [text.strip() for text in output_text]
print('''HF generation:''' , SCREAMING_SNAKE_CASE_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
__A : Dict = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__A : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 130 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 341 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Union[str, Any] = {
'''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''],
'''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = ['''BertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
'''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BertForMaskedLM''',
'''BertForMultipleChoice''',
'''BertForNextSentencePrediction''',
'''BertForPreTraining''',
'''BertForQuestionAnswering''',
'''BertForSequenceClassification''',
'''BertForTokenClassification''',
'''BertLayer''',
'''BertLMHeadModel''',
'''BertModel''',
'''BertPreTrainedModel''',
'''load_tf_weights_in_bert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
'''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBertEmbeddings''',
'''TFBertForMaskedLM''',
'''TFBertForMultipleChoice''',
'''TFBertForNextSentencePrediction''',
'''TFBertForPreTraining''',
'''TFBertForQuestionAnswering''',
'''TFBertForSequenceClassification''',
'''TFBertForTokenClassification''',
'''TFBertLMHeadModel''',
'''TFBertMainLayer''',
'''TFBertModel''',
'''TFBertPreTrainedModel''',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = ['''TFBertTokenizer''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
'''FlaxBertForCausalLM''',
'''FlaxBertForMaskedLM''',
'''FlaxBertForMultipleChoice''',
'''FlaxBertForNextSentencePrediction''',
'''FlaxBertForPreTraining''',
'''FlaxBertForQuestionAnswering''',
'''FlaxBertForSequenceClassification''',
'''FlaxBertForTokenClassification''',
'''FlaxBertModel''',
'''FlaxBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_lowercase: Any = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_lowercase: Optional[int] = logging.get_logger(__name__)
class lowerCamelCase__ ( A__ ):
UpperCamelCase__ ="""maskformer"""
UpperCamelCase__ ={"""hidden_size""": """mask_feature_size"""}
UpperCamelCase__ =["""resnet""", """swin"""]
UpperCamelCase__ =["""detr"""]
def __init__( self : List[str] , lowercase__ : Optional[int] = 2_56 , lowercase__ : List[str] = 2_56 , lowercase__ : Any = 0.1 , lowercase__ : Dict = False , lowercase__ : str = None , lowercase__ : Any = None , lowercase__ : Union[str, Any] = 0.0_2 , lowercase__ : Tuple = 1.0 , lowercase__ : List[str] = 1.0 , lowercase__ : Tuple = 1.0 , lowercase__ : Tuple = 2_0.0 , lowercase__ : Dict = None , **lowercase__ : Optional[int] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCAmelCase = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase = backbone_config.pop('model_type' )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(_UpperCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
_lowerCAmelCase = (
decoder_config.pop('model_type' ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase = CONFIG_MAPPING[decoder_type]
_lowerCAmelCase = config_class.from_dict(_UpperCamelCase )
_lowerCAmelCase = backbone_config
_lowerCAmelCase = decoder_config
# main feature dimension for the model
_lowerCAmelCase = fpn_feature_size
_lowerCAmelCase = mask_feature_size
# initializer
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
# Hungarian matcher && loss
_lowerCAmelCase = cross_entropy_weight
_lowerCAmelCase = dice_weight
_lowerCAmelCase = mask_weight
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = no_object_weight
_lowerCAmelCase = output_auxiliary_logits
_lowerCAmelCase = self.decoder_config.encoder_attention_heads
_lowerCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**_UpperCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , lowercase__ : List[str] , lowercase__ : Union[str, Any] , **lowercase__ : List[Any] ):
return cls(
backbone_config=_UpperCamelCase , decoder_config=_UpperCamelCase , **_UpperCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.decoder_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 192 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCamelCase_ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase_ = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCamelCase_ = max(len(SCREAMING_SNAKE_CASE_ ) ,len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) ,b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 0 |
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if not head:
return True
# split the list to two parts
a__ , a__ = head.next, head
while fast and fast.next:
a__ = fast.next.next
a__ = slow.next
a__ = slow.next
a__ = None # Don't forget here! But forget still works!
# reverse the second part
a__ = None
while second:
a__ = second.next
a__ = node
a__ = second
a__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a__ = node.next
a__ = head.next
return True
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a__ = a__ = a__ = head
while fast and fast.next:
a__ , a__ = fast.next.next, slow.next
# 2. Push the second half into the stack
a__ = [slow.val]
while slow.next:
a__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a__ = cur.next
return True
def __lowercase ( __lowerCAmelCase : Tuple ):
if not head or not head.next:
return True
a__ = {}
a__ = 0
while head:
if head.val in d:
d[head.val].append(SCREAMING_SNAKE_CASE_ )
else:
a__ = [pos]
a__ = head.next
pos += 1
a__ = pos - 1
a__ = 0
for v in d.values():
if len(SCREAMING_SNAKE_CASE_ ) % 2 != 0:
middle += 1
else:
a__ = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
if v[i] + v[len(SCREAMING_SNAKE_CASE_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case_ :
@staticmethod
def __A ( *__lowerCAmelCase , **__lowerCAmelCase ):
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case_ ( unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ : List[str] = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE_ : List[str] = len(_UpperCamelCase )
self.assertGreater(_UpperCamelCase , 0 )
self.assertEqual(
_UpperCamelCase , [
{
'score': ANY(_UpperCamelCase ),
'label': ANY(_UpperCamelCase ),
'box': {'xmin': ANY(_UpperCamelCase ), 'ymin': ANY(_UpperCamelCase ), 'xmax': ANY(_UpperCamelCase ), 'ymax': ANY(_UpperCamelCase )},
}
for i in range(_UpperCamelCase )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __A ( self ):
pass
@require_torch
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
SCREAMING_SNAKE_CASE_ : str = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : Any = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __A ( self ):
pass
@require_torch
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.2
SCREAMING_SNAKE_CASE_ : str = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : str = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ : int = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_UpperCamelCase , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 345 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
_SCREAMING_SNAKE_CASE : str = get_logger(__name__)
class _snake_case ( enum.Enum ):
'''simple docstring'''
__snake_case = """all_checks"""
__snake_case = """basic_checks"""
__snake_case = """no_checks"""
class _snake_case ( A__ ):
'''simple docstring'''
pass
class _snake_case ( A__ ):
'''simple docstring'''
pass
class _snake_case ( A__ ):
'''simple docstring'''
pass
class _snake_case ( A__ ):
'''simple docstring'''
pass
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
__magic_name__ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__magic_name__ : str = " for " + verification_name if verification_name is not None else ""
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn\'t match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _snake_case ( A__ ):
'''simple docstring'''
pass
class _snake_case ( A__ ):
'''simple docstring'''
pass
class _snake_case ( A__ ):
'''simple docstring'''
pass
class _snake_case ( A__ ):
'''simple docstring'''
pass
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
if len(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ ) ) )
__magic_name__ : int = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE_ ) )
logger.info("All the splits matched successfully." )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = True ):
"""simple docstring"""
if record_checksum:
__magic_name__ : int = shaaaa()
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(SCREAMING_SNAKE_CASE_ )
__magic_name__ : Any = m.hexdigest()
else:
__magic_name__ : Any = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE_ ), "checksum": checksum}
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 436 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 0 |
import logging
from transformers import PretrainedConfig
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class snake_case_ ( A__ ):
'''simple docstring'''
__UpperCamelCase = """bertabs"""
def __init__( self, A_=3_0522, A_=512, A_=6, A_=512, A_=8, A_=512, A_=0.2, A_=6, A_=768, A_=8, A_=2048, A_=0.2, **A_, ) -> List[Any]:
super().__init__(**_UpperCamelCase )
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =max_pos
UpperCAmelCase__ =enc_layers
UpperCAmelCase__ =enc_hidden_size
UpperCAmelCase__ =enc_heads
UpperCAmelCase__ =enc_ff_size
UpperCAmelCase__ =enc_dropout
UpperCAmelCase__ =dec_layers
UpperCAmelCase__ =dec_hidden_size
UpperCAmelCase__ =dec_heads
UpperCAmelCase__ =dec_ff_size
UpperCAmelCase__ =dec_dropout
| 625 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , a , a=2 , a=True , a=False , a=10 , a=3 , a=32 * 4 , a=32 * 6 , a=4 , a=32 , ):
"""simple docstring"""
snake_case_ :Optional[Any] = parent
snake_case_ :Union[str, Any] = batch_size
snake_case_ :Dict = is_training
snake_case_ :List[Any] = use_auxiliary_loss
snake_case_ :Tuple = num_queries
snake_case_ :List[str] = num_channels
snake_case_ :List[str] = min_size
snake_case_ :Dict = max_size
snake_case_ :Optional[Any] = num_labels
snake_case_ :Optional[Any] = mask_feature_size
def _a ( self ):
"""simple docstring"""
snake_case_ :int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCamelCase )
snake_case_ :Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCamelCase )
snake_case_ :Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCamelCase ) > 0.5
).float()
snake_case_ :Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCamelCase ) > 0.5).long()
snake_case_ :Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _a ( self ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ :int = self.prepare_config_and_inputs()
snake_case_ :int = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Union[str, Any] = output.encoder_hidden_states
snake_case_ :Union[str, Any] = output.pixel_decoder_hidden_states
snake_case_ :str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCamelCase ) , config.decoder_config.decoder_layers )
def _a ( self , a , a , a , a=False ):
"""simple docstring"""
with torch.no_grad():
snake_case_ :str = MaskFormerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ :int = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
snake_case_ :Optional[Any] = model(_UpperCamelCase , output_hidden_states=_UpperCamelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCamelCase , _UpperCamelCase )
def _a ( self , a , a , a , a , a ):
"""simple docstring"""
snake_case_ :Any = MaskFormerForInstanceSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
def comm_check_on_output(a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ :List[str] = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase )
snake_case_ :int = model(_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
snake_case_ :Dict = model(
pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
comm_check_on_output(_UpperCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase (A__ ,A__ ,unittest.TestCase ):
'''simple docstring'''
a__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a__ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def _a ( self ):
"""simple docstring"""
snake_case_ :List[str] = MaskFormerModelTester(self )
snake_case_ :Optional[int] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def _a ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def _a ( self ):
"""simple docstring"""
snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCamelCase )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _a ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _a ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _a ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _a ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def _a ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ):
"""simple docstring"""
pass
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :List[str] = model_class(_UpperCamelCase )
snake_case_ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :str = [*signature.parameters.keys()]
snake_case_ :Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@slow
def _a ( self ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case_ :List[str] = MaskFormerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = (self.model_tester.min_size,) * 2
snake_case_ :str = {
"pixel_values": torch.randn((2, 3, *size) , device=_UpperCamelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=_UpperCamelCase ),
"class_labels": torch.zeros(2 , 10 , device=_UpperCamelCase ).long(),
}
snake_case_ :Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCamelCase )
snake_case_ :str = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase )
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :str = model_class(_UpperCamelCase ).to(_UpperCamelCase )
snake_case_ :int = model(**_UpperCamelCase , output_attentions=_UpperCamelCase )
self.assertTrue(outputs.attentions is not None )
def _a ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case_ :Any = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ :int = self.model_tester.prepare_config_and_inputs()
snake_case_ :Dict = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
snake_case_ :Optional[int] = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ).loss
loss.backward()
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ :str = self.model_tester.prepare_config_and_inputs()
snake_case_ :str = True
snake_case_ :int = True
snake_case_ :List[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
snake_case_ :Optional[Any] = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase )
snake_case_ :str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ :List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case_ :Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ :Any = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase : Dict = 1E-4
def A ( ):
"""simple docstring"""
snake_case_ :Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _a ( self ):
"""simple docstring"""
snake_case_ :Dict = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_UpperCamelCase )
snake_case_ :List[str] = self.default_image_processor
snake_case_ :Any = prepare_img()
snake_case_ :Any = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
snake_case_ :List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
snake_case_ :str = model(**_UpperCamelCase )
snake_case_ :Tuple = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
snake_case_ :str = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
snake_case_ :str = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_UpperCamelCase )
.eval()
)
snake_case_ :int = self.default_image_processor
snake_case_ :Optional[int] = prepare_img()
snake_case_ :Tuple = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
snake_case_ :Optional[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(**_UpperCamelCase )
# masks_queries_logits
snake_case_ :str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ :Optional[Any] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
snake_case_ :Optional[int] = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
snake_case_ :str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ :Union[str, Any] = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def _a ( self ):
"""simple docstring"""
snake_case_ :str = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_UpperCamelCase )
.eval()
)
snake_case_ :str = self.default_image_processor
snake_case_ :List[Any] = prepare_img()
snake_case_ :Optional[Any] = image_processor(_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
snake_case_ :Tuple = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCamelCase , (1, 3, 8_00, 10_88) )
with torch.no_grad():
snake_case_ :Optional[int] = model(**_UpperCamelCase )
# masks_queries_logits
snake_case_ :int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ :Optional[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
snake_case_ :Union[str, Any] = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
# class_queries_logits
snake_case_ :List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ :Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) )
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_UpperCamelCase )
.eval()
)
snake_case_ :Union[str, Any] = self.default_image_processor
snake_case_ :Union[str, Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
snake_case_ :Tuple = inputs["pixel_values"].to(_UpperCamelCase )
snake_case_ :str = [el.to(_UpperCamelCase ) for el in inputs["mask_labels"]]
snake_case_ :str = [el.to(_UpperCamelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
snake_case_ :Optional[int] = model(**_UpperCamelCase )
self.assertTrue(outputs.loss is not None )
| 584 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
A : List[str] = """"""
while len(SCREAMING_SNAKE_CASE_ ) % 3 != 0:
A : Tuple = """0""" + bin_string
A : Dict = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
A : int = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE_ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE_ ) )
oct_string += str(SCREAMING_SNAKE_CASE_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 662 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 0 |
import argparse
import os
import re
import packaging.version
lowerCamelCase__ : Union[str, Any] = """examples/"""
lowerCamelCase__ : Union[str, Any] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCamelCase__ : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCamelCase__ : List[Any] = """README.md"""
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ = f.read()
snake_case__ , snake_case__ = REPLACE_PATTERNS[pattern]
snake_case__ = replace.replace('''VERSION''' , __lowerCAmelCase )
snake_case__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern='''examples''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = '''🤗 Transformers currently provides the following architectures'''
snake_case__ = '''1. Want to contribute a new model?'''
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case__ = f.readlines()
# Find the start of the list.
snake_case__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
snake_case__ = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Any:
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
snake_case__ = f.read()
snake_case__ = REPLACE_PATTERNS['''init'''][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=False ) -> List[Any]:
snake_case__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
snake_case__ = default_version.base_version
elif patch:
snake_case__ = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
snake_case__ = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
snake_case__ = input(F"""Which version are you releasing? [{default_version}]""" )
if len(__lowerCAmelCase ) == 0:
snake_case__ = default_version
print(F"""Updating version to {version}.""" )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = get_version()
snake_case__ = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
snake_case__ = current_version.base_version
# Check with the user we got that right.
snake_case__ = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(__lowerCAmelCase ) == 0:
snake_case__ = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(__lowerCAmelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCamelCase__ : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 33 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = """▁"""
lowerCamelCase__ : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase__ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
lowerCamelCase__ : List[Any] = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
lowerCamelCase__ : List[str] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = ['input_ids', 'attention_mask']
__lowercase : List[int] = []
__lowercase : List[int] = []
def __init__( self:List[Any] , _a:int , _a:Optional[int]="<s>" , _a:Any="</s>" , _a:int="</s>" , _a:str="<s>" , _a:Tuple="<unk>" , _a:Any="<pad>" , _a:str="<mask>" , _a:str=None , _a:Union[str, Any]=None , _a:List[Any]=None , _a:Optional[Dict[str, Any]] = None , _a:Any=None , _a:str=False , **_a:Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case__ = legacy_behaviour
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , tokenizer_file=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_a , **_a , )
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
snake_case__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ = 1
snake_case__ = len(self.sp_model )
snake_case__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
snake_case__ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case__ = src_lang if src_lang is not None else '''eng_Latn'''
snake_case__ = self.lang_code_to_id[self._src_lang]
snake_case__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self:Optional[Any] ):
snake_case__ = self.__dict__.copy()
snake_case__ = None
snake_case__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self:int , _a:List[Any] ):
snake_case__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str ):
snake_case__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
snake_case__ = [1] * len(self.prefix_tokens )
snake_case__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[int] , _a:Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:str , _a:Optional[str] , _a:Optional[str] , **_a:List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
snake_case__ = src_lang
snake_case__ = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
snake_case__ = self.convert_tokens_to_ids(_a )
snake_case__ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:str ):
return self.sp_model.encode(_a , out_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Any ):
snake_case__ = ''''''.join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[str] , _a:str = "eng_Latn" , _a:Optional[List[str]] = None , _a:str = "fra_Latn" , **_a:Union[str, Any] , ):
snake_case__ = src_lang
snake_case__ = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self:int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
snake_case__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:str ):
snake_case__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
| 33 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCamelCase__ : Dict = logging.get_logger(__name__)
@dataclass
class __magic_name__ :
'''simple docstring'''
def __init__( self:Dict , _a:Any=False , _a:str=False , _a:Optional[int]=6.0 , _a:Any=None , _a:List[str]=False , _a:int=False , _a:Dict=None , _a:int="fp4" , _a:Optional[Any]=False , **_a:List[Any] , ):
snake_case__ = load_in_abit
snake_case__ = load_in_abit
snake_case__ = llm_inta_threshold
snake_case__ = llm_inta_skip_modules
snake_case__ = llm_inta_enable_fpaa_cpu_offload
snake_case__ = llm_inta_has_fpaa_weight
snake_case__ = bnb_abit_quant_type
snake_case__ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
snake_case__ = torch.floataa
elif isinstance(_a , _a ):
snake_case__ = getattr(_a , _a )
elif isinstance(_a , torch.dtype ):
snake_case__ = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self:str ):
if not isinstance(self.llm_inta_threshold , _a ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _a ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _a ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , _a ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , _a ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , _a ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return self.load_in_abit or self.load_in_abit
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls:List[str] , _a:int , _a:int , **_a:Any ):
snake_case__ = cls(**_a )
snake_case__ = []
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
to_remove.append(_a )
for key in to_remove:
kwargs.pop(_a , _a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Union[str, os.PathLike] ):
with open(_a , '''w''' , encoding='''utf-8''' ) as writer:
snake_case__ = self.to_dict()
snake_case__ = json.dumps(_a , indent=2 , sort_keys=_a ) + '''\n'''
writer.write(_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self:List[Any] ):
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:bool = True ):
if use_diff is True:
snake_case__ = self.to_diff_dict()
else:
snake_case__ = self.to_dict()
return json.dumps(_a , indent=2 , sort_keys=_a ) + "\n"
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.to_dict()
# get the default config dict
snake_case__ = BitsAndBytesConfig().to_dict()
snake_case__ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
snake_case__ = value
return serializable_config_dict
| 33 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 1 |
from functools import lru_cache
@lru_cache
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = 'EncodecFeatureExtractor'
__lowercase : Dict = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self:Optional[int] , _a:Union[str, Any] , _a:Tuple ):
super().__init__(_a , _a )
snake_case__ = self.feature_extractor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[int]=None , _a:Tuple=None , _a:List[Any]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_a , language=_a , no_timestamps=_a )
def __call__( self:List[Any] , *_a:Dict , **_a:Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''audio''' , _a )
snake_case__ = kwargs.pop('''sampling_rate''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if audio is not None:
snake_case__ = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
snake_case__ = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
snake_case__ = audio_inputs['''padding_mask''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:int , *_a:List[str] , **_a:List[Any] ):
snake_case__ = kwargs.pop('''audio''' , _a )
snake_case__ = kwargs.pop('''padding_mask''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if audio_values is not None:
return self._decode_audio(_a , padding_mask=_a )
else:
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Optional[int] , **_a:Dict ):
return self.tokenizer.decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[Any] , _a:Optional = None ):
snake_case__ = to_numpy(_a )
snake_case__ , snake_case__ , snake_case__ = audio_values.shape
if padding_mask is None:
return list(_a )
snake_case__ = to_numpy(_a )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
snake_case__ = seq_len - padding_mask.shape[-1]
snake_case__ = 1 - self.feature_extractor.padding_value
snake_case__ = np.pad(_a , ((0, 0), (0, difference)) , '''constant''' , constant_values=_a )
snake_case__ = audio_values.tolist()
for i in range(_a ):
snake_case__ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
snake_case__ = sliced_audio.reshape(_a , -1 )
return audio_values
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase__ : Tuple = TypeVar("""T""")
class __magic_name__ (Generic[T] ):
'''simple docstring'''
def __init__( self:List[str] , _a:T ):
snake_case__ = data
snake_case__ = self
snake_case__ = 0
class __magic_name__ (Generic[T] ):
'''simple docstring'''
def __init__( self:Any ):
# map from node name to the node object
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:T ):
# create a new set with x as its member
snake_case__ = DisjointSetTreeNode(_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:T ):
# find the set x belongs to (with path-compression)
snake_case__ = self.map[data]
if elem_ref != elem_ref.parent:
snake_case__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:DisjointSetTreeNode[T] , _a:DisjointSetTreeNode[T] ):
# helper function for union operation
if nodea.rank > nodea.rank:
snake_case__ = nodea
else:
snake_case__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:T , _a:T ):
# merge 2 disjoint sets
self.link(self.find_set(_a ) , self.find_set(_a ) )
class __magic_name__ (Generic[T] ):
'''simple docstring'''
def __init__( self:List[str] ):
# connections: map from the node to the neighbouring nodes (with weights)
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:T ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:T , _a:T , _a:int ):
# add an edge with the given weight
self.add_node(_a )
self.add_node(_a )
snake_case__ = weight
snake_case__ = weight
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = []
snake_case__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _a : x[2] )
# creating the disjoint set
snake_case__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_a )
# MST generation
snake_case__ = 0
snake_case__ = 0
snake_case__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case__ , snake_case__ , snake_case__ = edges[index]
index += 1
snake_case__ = disjoint_set.find_set(_a )
snake_case__ = disjoint_set.find_set(_a )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_a , _a , _a )
disjoint_set.union(_a , _a )
return graph
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = KandinskyVaaPipeline
__lowercase : List[str] = [
'image_embeds',
'negative_image_embeds',
]
__lowercase : Any = ['image_embeds', 'negative_image_embeds']
__lowercase : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowercase : Optional[int] = False
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
torch.manual_seed(0 )
snake_case__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case__ = UNetaDConditionModel(**_a )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
torch.manual_seed(0 )
snake_case__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.dummy_unet
snake_case__ = self.dummy_movq
snake_case__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
snake_case__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:int , _a:Union[str, Any]=0 ):
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = pipe(**self.get_dummy_inputs(_a ) )
snake_case__ = output.images
snake_case__ = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
snake_case__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
snake_case__ = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
snake_case__ = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
snake_case__ = '''red cat, 4k photo'''
snake_case__ = torch.Generator(device='''cuda''' ).manual_seed(0 )
snake_case__ , snake_case__ = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case__ = torch.Generator(device='''cuda''' ).manual_seed(0 )
snake_case__ = pipeline(
image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_00 , output_type='''np''' , )
snake_case__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_a , _a )
| 33 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 1 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase__ : Dict = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ : Union[str, Any] = """PoolFormerConfig"""
# Base docstring
lowerCamelCase__ : List[str] = """sail/poolformer_s12"""
lowerCamelCase__ : Optional[Any] = [1, 5_1_2, 7, 7]
# Image classification docstring
lowerCamelCase__ : Tuple = """sail/poolformer_s12"""
lowerCamelCase__ : Optional[int] = """tabby, tabby cat"""
lowerCamelCase__ : Union[str, Any] = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = False ) -> Any:
if drop_prob == 0.0 or not training:
return input
snake_case__ = 1 - drop_prob
snake_case__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case__ = keep_prob + torch.rand(__lowerCAmelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case__ = input.div(__lowerCAmelCase ) * random_tensor
return output
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Tuple , _a:Optional[float] = None ):
super().__init__()
snake_case__ = drop_prob
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:torch.Tensor ):
return drop_path(_a , self.drop_prob , self.training )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return "p={}".format(self.drop_prob )
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[int] , _a:Tuple , _a:List[str] , _a:Dict , _a:List[Any]=None ):
super().__init__()
snake_case__ = patch_size if isinstance(_a , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case__ = stride if isinstance(_a , collections.abc.Iterable ) else (stride, stride)
snake_case__ = padding if isinstance(_a , collections.abc.Iterable ) else (padding, padding)
snake_case__ = nn.Convad(_a , _a , kernel_size=_a , stride=_a , padding=_a )
snake_case__ = norm_layer(_a ) if norm_layer else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[Any] ):
snake_case__ = self.projection(_a )
snake_case__ = self.norm(_a )
return embeddings
class __magic_name__ (nn.GroupNorm ):
'''simple docstring'''
def __init__( self:Dict , _a:Tuple , **_a:int ):
super().__init__(1 , _a , **_a )
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:List[Any] ):
super().__init__()
snake_case__ = nn.AvgPoolad(_a , stride=1 , padding=pool_size // 2 , count_include_pad=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:int ):
return self.pool(_a ) - hidden_states
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:str , _a:Optional[Any] , _a:Optional[Any] , _a:str , _a:List[Any] ):
super().__init__()
snake_case__ = nn.Convad(_a , _a , 1 )
snake_case__ = nn.Convad(_a , _a , 1 )
snake_case__ = PoolFormerDropPath(_a )
if isinstance(config.hidden_act , _a ):
snake_case__ = ACTaFN[config.hidden_act]
else:
snake_case__ = config.hidden_act
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Any ):
snake_case__ = self.conva(_a )
snake_case__ = self.act_fn(_a )
snake_case__ = self.drop(_a )
snake_case__ = self.conva(_a )
snake_case__ = self.drop(_a )
return hidden_states
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Any , _a:Union[str, Any] , _a:List[Any] , _a:int , _a:str , _a:Dict , _a:List[Any] ):
super().__init__()
snake_case__ = PoolFormerPooling(_a )
snake_case__ = PoolFormerOutput(_a , _a , _a , _a )
snake_case__ = PoolFormerGroupNorm(_a )
snake_case__ = PoolFormerGroupNorm(_a )
# Useful for training neural nets
snake_case__ = PoolFormerDropPath(_a ) if drop_path > 0.0 else nn.Identity()
snake_case__ = config.use_layer_scale
if config.use_layer_scale:
snake_case__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
snake_case__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) , requires_grad=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] ):
if self.use_layer_scale:
snake_case__ = self.pooling(self.before_norm(_a ) )
snake_case__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case__ = hidden_states + self.drop_path(_a )
snake_case__ = ()
snake_case__ = self.output(self.after_norm(_a ) )
snake_case__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case__ = hidden_states + self.drop_path(_a )
snake_case__ = (output,) + outputs
return outputs
else:
snake_case__ = self.drop_path(self.pooling(self.before_norm(_a ) ) )
# First residual connection
snake_case__ = pooling_output + hidden_states
snake_case__ = ()
# Second residual connection inside the PoolFormerOutput block
snake_case__ = self.drop_path(self.output(self.after_norm(_a ) ) )
snake_case__ = hidden_states + layer_output
snake_case__ = (output,) + outputs
return outputs
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Dict , _a:Dict ):
super().__init__()
snake_case__ = config
# stochastic depth decay rule
snake_case__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case__ = nn.ModuleList(_a )
# Transformer blocks
snake_case__ = []
snake_case__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_a ) )
snake_case__ = nn.ModuleList(_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Any , _a:List[str]=False , _a:Optional[Any]=True ):
snake_case__ = () if output_hidden_states else None
snake_case__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case__ , snake_case__ = layers
# Get patch embeddings from hidden_states
snake_case__ = embedding_layer(_a )
# Send the embeddings through the blocks
for _, blk in enumerate(_a ):
snake_case__ = blk(_a )
snake_case__ = layer_outputs[0]
if output_hidden_states:
snake_case__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = PoolFormerConfig
__lowercase : str = 'poolformer'
__lowercase : Optional[int] = 'pixel_values'
__lowercase : int = True
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Tuple ):
if isinstance(_a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:int=False ):
if isinstance(_a , _a ):
snake_case__ = value
lowerCamelCase__ : Optional[int] = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ : List[str] = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' ,snake_case_ ,)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Tuple , _a:List[str] ):
super().__init__(_a )
snake_case__ = config
snake_case__ = PoolFormerEncoder(_a )
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[torch.FloatTensor] = None , _a:Optional[bool] = None , _a:Optional[bool] = None , ):
snake_case__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
snake_case__ = self.encoder(
_a , output_hidden_states=_a , return_dict=_a , )
snake_case__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_a , hidden_states=encoder_outputs.hidden_states , )
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Optional[Any] , _a:List[str] ):
super().__init__()
snake_case__ = nn.Linear(config.hidden_size , config.hidden_size )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple ):
snake_case__ = self.dense(_a )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' ,snake_case_ ,)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Tuple , _a:Dict ):
super().__init__(_a )
snake_case__ = config.num_labels
snake_case__ = PoolFormerModel(_a )
# Final norm
snake_case__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[torch.FloatTensor] = None , _a:Optional[torch.LongTensor] = None , _a:Optional[bool] = None , _a:Optional[bool] = None , ):
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ = self.poolformer(
_a , output_hidden_states=_a , return_dict=_a , )
snake_case__ = outputs[0]
snake_case__ = self.classifier(self.norm(_a ).mean([-2, -1] ) )
snake_case__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ = '''single_label_classification'''
else:
snake_case__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case__ = MSELoss()
if self.num_labels == 1:
snake_case__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ = BCEWithLogitsLoss()
snake_case__ = loss_fct(_a , _a )
if not return_dict:
snake_case__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = args.pruning_method
snake_case__ = args.threshold
snake_case__ = args.model_name_or_path.rstrip('''/''' )
snake_case__ = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
snake_case__ = torch.load(os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) )
snake_case__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case__ = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
snake_case__ = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
snake_case__ = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
snake_case__ = MagnitudeBinarizer.apply(inputs=__lowerCAmelCase , threshold=__lowerCAmelCase )
snake_case__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case__ = name[:-6]
snake_case__ = model[F"""{prefix_}mask_scores"""]
snake_case__ = TopKBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case__ = name[:-6]
snake_case__ = model[F"""{prefix_}mask_scores"""]
snake_case__ = ThresholdBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case__ = name[:-6]
snake_case__ = model[F"""{prefix_}mask_scores"""]
snake_case__ , snake_case__ = -0.1, 1.1
snake_case__ = torch.sigmoid(__lowerCAmelCase )
snake_case__ = s * (r - l) + l
snake_case__ = s_bar.clamp(min=0.0 , max=1.0 )
snake_case__ = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
snake_case__ = os.path.join(
os.path.dirname(__lowerCAmelCase ) , F"""bertarized_{os.path.basename(__lowerCAmelCase )}""" )
if not os.path.isdir(__lowerCAmelCase ):
shutil.copytree(__lowerCAmelCase , __lowerCAmelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowerCamelCase__ : Optional[int] = parser.parse_args()
main(args)
| 33 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[Any] , _a:Dict , _a:List[Any] ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = None
ops.enable_eager_execution_internal()
snake_case__ = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
snake_case__ = tf.config.list_logical_devices(device_type='''CPU''' )
snake_case__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
snake_case__ = GradientAccumulator()
snake_case__ = tf.Variable([4.0, 3.0] )
snake_case__ , snake_case__ = create_optimizer(5e-5 , 10 , 5 )
snake_case__ = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a:Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a:Optional[int] , _a:List[str] ):
with strategy.scope():
snake_case__ = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a:Any , _a:Any ):
snake_case__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 33 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ : Optional[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
snake_case__ = '''lm_head'''
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case__ = value
elif weight_type == "weight_g":
snake_case__ = value
elif weight_type == "weight_v":
snake_case__ = value
elif weight_type == "bias":
snake_case__ = value
else:
snake_case__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
snake_case__ = []
snake_case__ = fairseq_model.state_dict()
snake_case__ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
snake_case__ = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case__ = True
if "*" in mapped_key:
snake_case__ = name.split(__lowerCAmelCase )[0].split('''.''' )[-2]
snake_case__ = mapped_key.replace('''*''' , __lowerCAmelCase )
if "weight_g" in name:
snake_case__ = '''weight_g'''
elif "weight_v" in name:
snake_case__ = '''weight_v'''
elif "bias" in name:
snake_case__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ = '''weight'''
else:
snake_case__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = full_name.split('''conv_layers.''' )[-1]
snake_case__ = name.split('''.''' )
snake_case__ = int(items[0] )
snake_case__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ) -> Optional[Any]:
if config_path is not None:
snake_case__ = UniSpeechConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case__ = UniSpeechConfig()
if is_finetuned:
if dict_path:
snake_case__ = Dictionary.load_from_json(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ = target_dict.pad_index
snake_case__ = target_dict.bos_index
snake_case__ = target_dict.eos_index
snake_case__ = len(target_dict.symbols )
snake_case__ = os.path.join(__lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
snake_case__ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ = 42
snake_case__ = 43
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = WavaVecaPhonemeCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCAmelCase , )
snake_case__ = True if config.feat_extract_norm == '''layer''' else False
snake_case__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case__ = UniSpeechForCTC(__lowerCAmelCase )
else:
snake_case__ = UniSpeechForPreTraining(__lowerCAmelCase )
if is_finetuned:
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case__ = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_unispeech.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = IFInpaintingSuperResolutionPipeline
__lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowercase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Dict , _a:Optional[int]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:str ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' ,'False' ) ) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,)
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 16_00, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple ):
# configuration for running training on smdistributed Model Parallel
snake_case__ = {
'''enabled''': True,
'''processes_per_host''': 8,
}
snake_case__ = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
snake_case__ = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
snake_case__ = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:int ):
TrainingJobAnalytics(_a ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] ):
# create estimator
snake_case__ = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
snake_case__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
snake_case__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 33 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
lowerCamelCase__ : Optional[int] = """Input must be a string of 8 numbers plus letter"""
lowerCamelCase__ : List[str] = """TRWAGMYFPDXBNJZSQVHLCKE"""
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = F"""Expected string as input, found {type(__lowerCAmelCase ).__name__}"""
raise TypeError(__lowerCAmelCase )
snake_case__ = spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
snake_case__ = int(spanish_id_clean[0:8] )
snake_case__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 1 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = input("""Enter image url: """).strip()
print(F"""Downloading image from {url} ...""")
lowerCamelCase__ : Optional[int] = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
lowerCamelCase__ : Dict = requests.get(image_url).content
lowerCamelCase__ : Optional[int] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 33 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = field(default='text-classification' ,metadata={'include_in_asdict_even_if_is_default': True} )
__lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
__lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
__lowercase : str = "text"
__lowercase : str = "labels"
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
snake_case__ = copy.deepcopy(self )
snake_case__ = self.label_schema.copy()
snake_case__ = features[self.label_column]
snake_case__ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE__ ( self:int ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 33 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 1 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , '''html.parser''' )
snake_case__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
snake_case__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 33 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 1 |
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCamelCase__ : Tuple = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
lowerCamelCase__ : Dict = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
lowerCamelCase__ : Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : int = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
lowerCamelCase__ : Dict = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : str = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
lowerCamelCase__ : Dict = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : Any = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
lowerCamelCase__ : List[str] = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : int = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
lowerCamelCase__ : List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
lowerCamelCase__ : str = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
lowerCamelCase__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
lowerCamelCase__ : List[str] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
lowerCamelCase__ : List[str] = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
lowerCamelCase__ : Optional[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
lowerCamelCase__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
lowerCamelCase__ : Tuple = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : str = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
lowerCamelCase__ : Optional[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
lowerCamelCase__ : List[Any] = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
lowerCamelCase__ : Union[str, Any] = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
lowerCamelCase__ : Union[str, Any] = """"""
lowerCamelCase__ : Any = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
lowerCamelCase__ : int = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
lowerCamelCase__ : Optional[Any] = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
assert ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
with pytest.raises(__lowerCAmelCase , match=re.escape(expected_error.format(path='''root''' ) ) ):
snake_case__ = ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
with pytest.raises(__lowerCAmelCase , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
ReadMe.from_string(__lowerCAmelCase , __lowerCAmelCase , suppress_parsing_errors=__lowerCAmelCase )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
snake_case__ = ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
snake_case__ = expected_error.format(path=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
snake_case__ = ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
snake_case__ = expected_error.format(path=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = Path(__lowerCAmelCase ) / '''README.md'''
with open(__lowerCAmelCase , '''w+''' ) as readme_file:
readme_file.write(__lowerCAmelCase )
ReadMe.from_readme(__lowerCAmelCase , __lowerCAmelCase , suppress_parsing_errors=__lowerCAmelCase )
| 33 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
snake_case__ , snake_case__ = [], []
while len(__lowerCAmelCase ) > 1:
snake_case__ , snake_case__ = min(__lowerCAmelCase ), max(__lowerCAmelCase )
start.append(__lowerCAmelCase )
end.append(__lowerCAmelCase )
collection.remove(__lowerCAmelCase )
collection.remove(__lowerCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ : int = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 33 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:List[str] ):
snake_case__ = data
snake_case__ = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Dict , _a:List[str] ):
return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
snake_case__ = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[Any] ):
snake_case__ = list(struct.unpack('''>16L''' , _a ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.padding()
snake_case__ = self.split_blocks()
for block in self.blocks:
snake_case__ = self.expand_block(_a )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case__ = (b & c) | ((~b) & d)
snake_case__ = 0X5A_827_999
elif 20 <= i < 40:
snake_case__ = b ^ c ^ d
snake_case__ = 0X6E_D9E_BA1
elif 40 <= i < 60:
snake_case__ = (b & c) | (b & d) | (c & d)
snake_case__ = 0X8F_1BB_CDC
elif 60 <= i < 80:
snake_case__ = b ^ c ^ d
snake_case__ = 0XCA_62C_1D6
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = (
self.rotate(_a , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(_a , 30 ),
c,
d,
)
snake_case__ = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = B'''Test String'''
assert SHAaHash(__lowerCAmelCase ).final_hash() == hashlib.shaa(__lowerCAmelCase ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
snake_case__ = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case__ = parser.parse_args()
snake_case__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case__ = f.read()
else:
snake_case__ = bytes(__lowerCAmelCase , '''utf-8''' )
print(SHAaHash(__lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 33 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
lowerCamelCase__ : Optional[int] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
lowerCamelCase__ : List[str] = {
"""vinai/phobert-base""": 2_5_6,
"""vinai/phobert-large""": 2_5_6,
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self:Union[str, Any] , _a:Optional[int] , _a:Tuple , _a:Optional[Any]="<s>" , _a:Optional[int]="</s>" , _a:Tuple="</s>" , _a:Tuple="<s>" , _a:List[Any]="<unk>" , _a:Tuple="<pad>" , _a:Dict="<mask>" , **_a:Tuple , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
snake_case__ = vocab_file
snake_case__ = merges_file
snake_case__ = {}
snake_case__ = 0
snake_case__ = 1
snake_case__ = 2
snake_case__ = 3
self.add_from_file(_a )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(_a , encoding='''utf-8''' ) as merges_handle:
snake_case__ = merges_handle.read().split('''\n''' )[:-1]
snake_case__ = [tuple(merge.split()[:-1] ) for merge in merges]
snake_case__ = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ = {}
def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[int] , _a:Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ = [self.cls_token_id]
snake_case__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str ):
if token in self.cache:
return self.cache[token]
snake_case__ = tuple(_a )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
snake_case__ = get_pairs(_a )
if not pairs:
return token
while True:
snake_case__ = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(_a ):
try:
snake_case__ = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(_a )
snake_case__ = new_word
if len(_a ) == 1:
break
else:
snake_case__ = get_pairs(_a )
snake_case__ = '''@@ '''.join(_a )
snake_case__ = word[:-4]
snake_case__ = word
return word
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[str] ):
snake_case__ = []
snake_case__ = re.findall(r'''\S+\n?''' , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Union[str, Any] ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:str ):
snake_case__ = ''' '''.join(_a ).replace('''@@ ''' , '''''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:str ):
if isinstance(_a , _a ):
try:
with open(_a , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
snake_case__ = f.readlines()
for lineTmp in lines:
snake_case__ = lineTmp.strip()
snake_case__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
snake_case__ = line[:idx]
snake_case__ = len(self.encoder )
| 33 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 1 |
from collections import deque
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = len(__lowerCAmelCase )
snake_case__ = deque()
snake_case__ = [False for _ in range(__lowerCAmelCase )]
snake_case__ = [-1 for _ in range(__lowerCAmelCase )]
snake_case__ = index_of[:]
def strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = index # the number when this node is seen
snake_case__ = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
snake_case__ = True
for w in g[v]:
if index_of[w] == -1:
snake_case__ = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case__ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case__ = []
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
while w != v:
snake_case__ = stack.pop()
snake_case__ = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
snake_case__ = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
lowerCamelCase__ : Tuple = 7
lowerCamelCase__ : Optional[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCamelCase__ : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCamelCase__ : int = [(u, v) for u, v in zip(source, target)]
lowerCamelCase__ : List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 33 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
for param in module.parameters():
snake_case__ = False
def SCREAMING_SNAKE_CASE ( ) -> int:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = plt.imshow(__lowerCAmelCase )
fig.axes.get_xaxis().set_visible(__lowerCAmelCase )
fig.axes.get_yaxis().set_visible(__lowerCAmelCase )
plt.show()
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = datetime.now()
snake_case__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 33 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = (EulerDiscreteScheduler,)
__lowercase : str = 10
def SCREAMING_SNAKE_CASE__ ( self:Tuple , **_a:str ):
snake_case__ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ = sample.to(_a )
for t in scheduler.timesteps:
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
snake_case__ = sample.to(_a )
for t in scheduler.timesteps:
snake_case__ = scheduler.scale_model_input(_a , _a )
snake_case__ = model(_a , _a )
snake_case__ = scheduler.step(_a , _a , _a , generator=_a )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 33 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
def get_masked_lm_array(__lowerCAmelCase ):
snake_case__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
def get_encoder_array(__lowerCAmelCase ):
snake_case__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
def get_encoder_layer_array(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
def get_encoder_attention_layer_array(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ = array.reshape(__lowerCAmelCase )
if "kernel" in name:
snake_case__ = array.transpose()
return torch.from_numpy(__lowerCAmelCase )
print(F"""Loading model based on config from {config_path}...""" )
snake_case__ = BertConfig.from_json_file(__lowerCAmelCase )
snake_case__ = BertForMaskedLM(__lowerCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
snake_case__ = model.bert.encoder.layer[layer_index]
# Self-attention
snake_case__ = layer.attention.self
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
snake_case__ = layer.attention.output
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
snake_case__ = get_encoder_attention_layer_array(
__lowerCAmelCase , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_attention_layer_norm/gamma''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_attention_layer_norm/beta''' )
# Intermediate
snake_case__ = layer.intermediate
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_intermediate_dense/kernel''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_intermediate_dense/bias''' )
# Output
snake_case__ = layer.output
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_dense/kernel''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_dense/bias''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_layer_norm/gamma''' )
snake_case__ = get_encoder_layer_array(__lowerCAmelCase , '''_output_layer_norm/beta''' )
# Embeddings
snake_case__ = get_encoder_array('''_position_embedding_layer/embeddings''' )
snake_case__ = get_encoder_array('''_type_embedding_layer/embeddings''' )
snake_case__ = get_encoder_array('''_embedding_norm_layer/gamma''' )
snake_case__ = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
snake_case__ = model.cls.predictions.transform
snake_case__ = get_masked_lm_array('''dense/kernel''' )
snake_case__ = get_masked_lm_array('''dense/bias''' )
snake_case__ = get_masked_lm_array('''layer_norm/gamma''' )
snake_case__ = get_masked_lm_array('''layer_norm/beta''' )
snake_case__ = get_masked_lm_array('''embedding_table''' )
# Pooling
snake_case__ = BertPooler(config=__lowerCAmelCase )
snake_case__ = get_encoder_array('''_pooler_layer/kernel''' )
snake_case__ = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__lowerCAmelCase )
# Integration test - should load without any errors ;)
snake_case__ = BertForMaskedLM.from_pretrained(__lowerCAmelCase )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
lowerCamelCase__ : int = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Dict , *_a:Union[str, Any] , **_a:List[Any] ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
snake_case__ = len(__lowerCAmelCase ) + 1
snake_case__ = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case__ = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
snake_case__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
snake_case__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
snake_case__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case__ = dp[i - 1][j]
else:
snake_case__ = 0
else:
snake_case__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCamelCase__ : int = """aab"""
lowerCamelCase__ : List[Any] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 33 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:Optional[Any] , _a:Any=2 , _a:Dict=True , _a:List[Any]=False , _a:List[str]=10 , _a:Union[str, Any]=3 , _a:Tuple=32 * 8 , _a:Dict=32 * 8 , _a:List[str]=4 , _a:Union[str, Any]=64 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = is_training
snake_case__ = use_auxiliary_loss
snake_case__ = num_queries
snake_case__ = num_channels
snake_case__ = min_size
snake_case__ = max_size
snake_case__ = num_labels
snake_case__ = hidden_dim
snake_case__ = hidden_dim
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
snake_case__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
snake_case__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
snake_case__ = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
snake_case__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case__ = self.num_queries
snake_case__ = self.num_labels
snake_case__ = [1, 1, 1, 1]
snake_case__ = self.num_channels
snake_case__ = 64
snake_case__ = 1_28
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
snake_case__ = self.hidden_dim
return config
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.prepare_config_and_inputs()
snake_case__ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:str ):
snake_case__ = output.encoder_hidden_states
snake_case__ = output.pixel_decoder_hidden_states
snake_case__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Optional[Any] , _a:Optional[int] , _a:List[str] , _a:Tuple=False ):
with torch.no_grad():
snake_case__ = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:str , _a:Dict , _a:Optional[Any] , _a:str , _a:str ):
snake_case__ = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a:Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ = model(pixel_values=_a , pixel_mask=_a )
snake_case__ = model(_a )
comm_check_on_output(_a )
snake_case__ = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__lowercase : int = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Union[str, Any] = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = MaskaFormerModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:int ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case__ = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = (self.model_tester.min_size,) * 2
snake_case__ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_a ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_a ),
'''class_labels''': torch.zeros(2 , 10 , device=_a ).long(),
}
snake_case__ = self.model_tester.get_config()
snake_case__ = MaskaFormerForUniversalSegmentation(_a ).to(_a )
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a ).to(_a )
snake_case__ = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
if not self.model_tester.is_training:
return
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = model_class(_a )
model.to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs()
snake_case__ = True
snake_case__ = True
snake_case__ = model_class(_a ).to(_a )
model.train()
snake_case__ = model(_a , mask_labels=_a , class_labels=_a )
snake_case__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ : Tuple = 1E-4
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:int ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
snake_case__ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(_a , return_tensors='''pt''' ).to(_a )
snake_case__ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case__ = model(**_a )
# masks_queries_logits
snake_case__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case__ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case__ = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
snake_case__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case__ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
snake_case__ = self.default_image_processor
snake_case__ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case__ = inputs['''pixel_values'''].to(_a )
snake_case__ = [el.to(_a ) for el in inputs['''mask_labels''']]
snake_case__ = [el.to(_a ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case__ = model(**_a )
self.assertTrue(outputs.loss is not None )
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : List[Any] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase__ : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowerCamelCase__ : int = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
snake_case__ = True
# Deal with multi-line cases
elif (
re.search(
rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __lowerCAmelCase , )
is not None
):
snake_case__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
snake_case__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
snake_case__ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
snake_case__ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
snake_case__ = True
if not attribute_used:
snake_case__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
snake_case__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
snake_case__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
snake_case__ = True
elif attribute.endswith('''_token_id''' ):
snake_case__ = True
# configuration class specific cases
if not case_allowed:
snake_case__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
snake_case__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
snake_case__ = dict(inspect.signature(config_class.__init__ ).parameters )
snake_case__ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
snake_case__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
snake_case__ = {}
if len(config_class.attribute_map ) > 0:
snake_case__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
snake_case__ = inspect.getsourcefile(__lowerCAmelCase )
snake_case__ = os.path.dirname(__lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
snake_case__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith('''modeling_''' )]
# Get the source code strings
snake_case__ = []
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
snake_case__ = []
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
snake_case__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
snake_case__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
snake_case__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase )
and issubclass(__lowerCAmelCase , __lowerCAmelCase )
and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
snake_case__ = check_config_attributes_being_used(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = unused_attributes
if len(__lowerCAmelCase ) > 0:
snake_case__ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 33 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase__ : int = getLogger(__name__)
lowerCamelCase__ : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 8 , __lowerCAmelCase = DEFAULT_DEVICE , __lowerCAmelCase=False , __lowerCAmelCase="summarization" , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Dict:
snake_case__ = Path(__lowerCAmelCase ).open('''w''' , encoding='''utf-8''' )
snake_case__ = str(__lowerCAmelCase )
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
if fpaa:
snake_case__ = model.half()
snake_case__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
snake_case__ = time.time()
# update config with task specific params
use_task_specific_params(__lowerCAmelCase , __lowerCAmelCase )
if prefix is None:
snake_case__ = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(__lowerCAmelCase , __lowerCAmelCase ) ) ):
snake_case__ = [prefix + text for text in examples_chunk]
snake_case__ = tokenizer(__lowerCAmelCase , return_tensors='''pt''' , truncation=__lowerCAmelCase , padding='''longest''' ).to(__lowerCAmelCase )
snake_case__ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__lowerCAmelCase , )
snake_case__ = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
snake_case__ = int(time.time() - start_time ) # seconds
snake_case__ = len(__lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=True ) -> str:
snake_case__ = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=__lowerCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=__lowerCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=__lowerCAmelCase , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=__lowerCAmelCase , required=__lowerCAmelCase , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=__lowerCAmelCase , required=__lowerCAmelCase , default=__lowerCAmelCase , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=__lowerCAmelCase , required=__lowerCAmelCase , default=__lowerCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=__lowerCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=__lowerCAmelCase , default=8 , required=__lowerCAmelCase , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=__lowerCAmelCase , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
snake_case__ , snake_case__ = parser.parse_known_args()
snake_case__ = parse_numeric_n_bool_cl_kwargs(__lowerCAmelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
snake_case__ = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
snake_case__ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
snake_case__ = generate_summaries_or_translations(
__lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
snake_case__ = calculate_bleu if '''translation''' in args.task else calculate_rouge
snake_case__ = [x.rstrip() for x in open(args.save_path ).readlines()]
snake_case__ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__lowerCAmelCase )]
snake_case__ = score_fn(__lowerCAmelCase , __lowerCAmelCase )
scores.update(__lowerCAmelCase )
if args.dump_args:
scores.update(__lowerCAmelCase )
if args.info:
snake_case__ = args.info
if verbose:
print(__lowerCAmelCase )
if args.score_path is not None:
json.dump(__lowerCAmelCase , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 33 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase__ : Tuple = """hf-internal-testing/tiny-random-bert"""
lowerCamelCase__ : Tuple = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
lowerCamelCase__ : Optional[int] = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = cached_file(_a , _a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_a , _a ) ) )
with open(os.path.join(_a , '''refs''' , '''main''' ) ) as f:
snake_case__ = f.read()
self.assertEqual(_a , os.path.join(_a , '''snapshots''' , _a , _a ) )
self.assertTrue(os.path.isfile(_a ) )
# File is cached at the same place the second time.
snake_case__ = cached_file(_a , _a )
self.assertEqual(_a , _a )
# Using a specific revision to test the full commit hash.
snake_case__ = cached_file(_a , _a , revision='''9b8c223''' )
self.assertEqual(_a , os.path.join(_a , '''snapshots''' , _a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(_a , '''is not a valid model identifier''' ):
snake_case__ = cached_file('''tiny-random-bert''' , _a )
with self.assertRaisesRegex(_a , '''is not a valid git identifier''' ):
snake_case__ = cached_file(_a , _a , revision='''aaaa''' )
with self.assertRaisesRegex(_a , '''does not appear to have a file named''' ):
snake_case__ = cached_file(_a , '''conf''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with self.assertRaisesRegex(_a , '''does not appear to have a file named''' ):
snake_case__ = cached_file(_a , '''conf''' )
with open(os.path.join(_a , '''refs''' , '''main''' ) ) as f:
snake_case__ = f.read()
self.assertTrue(os.path.isfile(os.path.join(_a , '''.no_exist''' , _a , '''conf''' ) ) )
snake_case__ = cached_file(_a , '''conf''' , _raise_exceptions_for_missing_entries=_a )
self.assertIsNone(_a )
snake_case__ = cached_file(_a , '''conf''' , local_files_only=_a , _raise_exceptions_for_missing_entries=_a )
self.assertIsNone(_a )
snake_case__ = mock.Mock()
snake_case__ = 5_00
snake_case__ = {}
snake_case__ = HTTPError
snake_case__ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
snake_case__ = cached_file(_a , '''conf''' , _raise_exceptions_for_connection_errors=_a )
self.assertIsNone(_a )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self:Any ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _a ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _a ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_a , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_a , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _a , revision='''ahaha''' )
snake_case__ = get_file_from_repo('''bert-base-cased''' , _a )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case__ = json.loads(open(_a , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = Path(_a ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_a , '''a.txt''' ) , str(_a ) )
self.assertIsNone(get_file_from_repo(_a , '''b.txt''' ) )
| 33 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : str = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = 'wav2vec2'
def __init__( self:Tuple , _a:List[Any]=32 , _a:int=7_68 , _a:Dict=12 , _a:Tuple=12 , _a:Optional[Any]=30_72 , _a:List[str]="gelu" , _a:List[Any]=0.1 , _a:List[Any]=0.1 , _a:List[Any]=0.1 , _a:List[Any]=0.0 , _a:Union[str, Any]=0.0 , _a:List[str]=0.1 , _a:Optional[int]=0.1 , _a:Optional[Any]=0.02 , _a:Tuple=1e-5 , _a:Optional[Any]="group" , _a:Dict="gelu" , _a:Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _a:Optional[int]=(5, 2, 2, 2, 2, 2, 2) , _a:Dict=(10, 3, 3, 3, 3, 2, 2) , _a:Optional[int]=False , _a:List[str]=1_28 , _a:Optional[int]=16 , _a:Optional[int]=False , _a:Union[str, Any]=True , _a:Dict=0.05 , _a:Tuple=10 , _a:Optional[int]=2 , _a:str=0.0 , _a:Tuple=10 , _a:List[str]=0 , _a:List[str]=3_20 , _a:Any=2 , _a:List[Any]=0.1 , _a:List[Any]=1_00 , _a:Tuple=2_56 , _a:List[Any]=2_56 , _a:Any=0.1 , _a:Tuple="sum" , _a:Union[str, Any]=False , _a:Tuple=False , _a:Tuple=2_56 , _a:Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , _a:Union[str, Any]=(5, 3, 3, 1, 1) , _a:Dict=(1, 2, 3, 1, 1) , _a:Dict=5_12 , _a:Tuple=0 , _a:Any=1 , _a:Union[str, Any]=2 , _a:List[Any]=False , _a:Optional[int]=3 , _a:Tuple=2 , _a:Any=3 , _a:Union[str, Any]=None , _a:int=None , **_a:Optional[Any] , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = feat_quantizer_dropout
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
snake_case__ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 33 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : str = {"""vocab_file""": """spiece.model"""}
lowerCamelCase__ : Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ : str = {
"""t5-small""": 5_1_2,
"""t5-base""": 5_1_2,
"""t5-large""": 5_1_2,
"""t5-3b""": 5_1_2,
"""t5-11b""": 5_1_2,
}
lowerCamelCase__ : Union[str, Any] = """▁"""
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[Any] = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
def __init__( self:int , _a:Any , _a:List[str]="</s>" , _a:Union[str, Any]="<unk>" , _a:List[Any]="<pad>" , _a:Optional[Any]=1_00 , _a:List[str]=None , _a:Optional[Dict[str, Any]] = None , _a:int=True , **_a:int , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ = [F"""<extra_id_{i}>""" for i in range(_a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ = len(set(filter(lambda _a : bool('''extra_id''' in str(_a ) ) , _a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
snake_case__ = legacy
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , legacy=_a , **_a , )
snake_case__ = vocab_file
snake_case__ = extra_ids
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Optional[int] , _a:Union[str, Any] , _a:str ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _a , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_a )) + [1]
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return list(
set(filter(lambda _a : bool(re.search(r'''<extra_id_\d+>''' , _a ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return [self._convert_token_to_id(_a ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[int] ):
if len(_a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = self._add_eos_if_not_present(_a )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ = self._add_eos_if_not_present(_a )
return token_ids_a + token_ids_a
def __getstate__( self:Dict ):
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self:Tuple , _a:Union[str, Any] ):
snake_case__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:"TextInput" , **_a:Tuple ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case__ = SPIECE_UNDERLINE + text.replace(_a , ''' ''' )
return super().tokenize(_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Optional[int] , **_a:str ):
if not self.legacy:
snake_case__ = text.startswith(_a )
if is_first:
snake_case__ = text[1:]
snake_case__ = self.sp_model.encode(_a , out_type=_a )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_a ):
snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int ):
if token.startswith('''<extra_id_''' ):
snake_case__ = re.match(r'''<extra_id_(\d+)>''' , _a )
snake_case__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[Any] ):
if index < self.sp_model.get_piece_size():
snake_case__ = self.sp_model.IdToPiece(_a )
else:
snake_case__ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Dict ):
snake_case__ = []
snake_case__ = ''''''
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(_a )
snake_case__ = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 33 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> None:
snake_case__ = len(__lowerCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__lowerCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __lowerCAmelCase , __lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> None:
snake_case__ = []
depth_first_search([] , [] , [] , __lowerCAmelCase , __lowerCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(__lowerCAmelCase )
print('''''' )
print(len(__lowerCAmelCase ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 33 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = os.path.abspath(__lowerCAmelCase )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
snake_case__ = tf.train.list_variables(__lowerCAmelCase )
snake_case__ = []
snake_case__ = []
snake_case__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
snake_case__ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
snake_case__ = name[1:]
# figure out how many levels deep the name is
snake_case__ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(__lowerCAmelCase )
# read data
snake_case__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
names.append('''/'''.join(__lowerCAmelCase ) )
arrays.append(__lowerCAmelCase )
logger.info(F"""Read a total of {len(__lowerCAmelCase ):,} layers""" )
# Sanity check
if len(set(__lowerCAmelCase ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__lowerCAmelCase ) )})""" )
snake_case__ = list(set(__lowerCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = full_name.split('''/''' )
snake_case__ = model
snake_case__ = []
for i, m_name in enumerate(__lowerCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
snake_case__ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
snake_case__ = getattr(__lowerCAmelCase , '''embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
snake_case__ = getattr(__lowerCAmelCase , '''encoder''' )
snake_case__ = getattr(__lowerCAmelCase , '''layer''' )
snake_case__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''pooler''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
snake_case__ = getattr(__lowerCAmelCase , '''token_type_embeddings''' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
snake_case__ = getattr(__lowerCAmelCase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
snake_case__ = getattr(__lowerCAmelCase , '''attention''' )
snake_case__ = getattr(__lowerCAmelCase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
snake_case__ = getattr(__lowerCAmelCase , '''attention''' )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''attention''' )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
snake_case__ = getattr(__lowerCAmelCase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
snake_case__ = getattr(__lowerCAmelCase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
snake_case__ = getattr(__lowerCAmelCase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
snake_case__ = getattr(__lowerCAmelCase , '''intermediate''' )
snake_case__ = getattr(__lowerCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
snake_case__ = getattr(__lowerCAmelCase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
snake_case__ = getattr(__lowerCAmelCase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
snake_case__ = getattr(__lowerCAmelCase , '''weight''' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
snake_case__ = '''.'''.join(__lowerCAmelCase )
if re.match(r'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __lowerCAmelCase ) or re.match(
r'''(\S+)\.attention\.output\.dense\.weight''' , __lowerCAmelCase ):
snake_case__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
snake_case__ = array.transpose()
if pointer.shape == array.shape:
snake_case__ = torch.from_numpy(__lowerCAmelCase )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
snake_case__ = BertConfig.from_json_file(__lowerCAmelCase )
snake_case__ = BertModel(__lowerCAmelCase )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowerCamelCase__ : Tuple = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 33 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 1 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = StableUnCLIPPipeline
__lowercase : Any = TEXT_TO_IMAGE_PARAMS
__lowercase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowercase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 32
snake_case__ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case__ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=_a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case__ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_a , num_layers=1 , )
torch.manual_seed(0 )
snake_case__ = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_a , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
snake_case__ = StableUnCLIPImageNormalizer(embedding_dim=_a )
snake_case__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
snake_case__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_a , layers_per_block=1 , upcast_attention=_a , use_linear_projection=_a , )
torch.manual_seed(0 )
snake_case__ = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_a , steps_offset=1 , )
torch.manual_seed(0 )
snake_case__ = AutoencoderKL()
snake_case__ = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple , _a:Any=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_a )
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
snake_case__ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ = pipe('''anime turle''' , generator=_a , output_type='''np''' )
snake_case__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
snake_case__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 33 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[Any] = ['input_features', 'is_longer']
def __init__( self:List[Any] , _a:Dict=64 , _a:List[str]=4_80_00 , _a:str=4_80 , _a:Tuple=10 , _a:Dict=10_24 , _a:Any=0.0 , _a:List[Any]=False , _a:float = 0 , _a:float = 1_40_00 , _a:int = None , _a:str = "fusion" , _a:str = "repeatpad" , **_a:str , ):
super().__init__(
feature_size=_a , sampling_rate=_a , padding_value=_a , return_attention_mask=_a , **_a , )
snake_case__ = top_db
snake_case__ = truncation
snake_case__ = padding
snake_case__ = fft_window_size
snake_case__ = (fft_window_size >> 1) + 1
snake_case__ = hop_length
snake_case__ = max_length_s
snake_case__ = max_length_s * sampling_rate
snake_case__ = sampling_rate
snake_case__ = frequency_min
snake_case__ = frequency_max
snake_case__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm=_a , mel_scale='''htk''' , )
snake_case__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_a , min_frequency=_a , max_frequency=_a , sampling_rate=_a , norm='''slaney''' , mel_scale='''slaney''' , )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = copy.deepcopy(self.__dict__ )
snake_case__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def SCREAMING_SNAKE_CASE__ ( self:int , _a:np.array , _a:Optional[np.array] = None ):
snake_case__ = spectrogram(
_a , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_a , log_mel='''dB''' , )
return log_mel_spectrogram.T
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[str] , _a:Dict , _a:int ):
snake_case__ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case__ = [0]
# randomly choose index for each part
snake_case__ = np.random.choice(ranges[0] )
snake_case__ = np.random.choice(ranges[1] )
snake_case__ = np.random.choice(ranges[2] )
snake_case__ = mel[idx_front : idx_front + chunk_frames, :]
snake_case__ = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case__ = mel[idx_back : idx_back + chunk_frames, :]
snake_case__ = torch.tensor(mel[None, None, :] )
snake_case__ = torch.nn.functional.interpolate(
_a , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_a )
snake_case__ = mel_shrink[0][0].numpy()
snake_case__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:np.array , _a:Dict , _a:List[str] , _a:Union[str, Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case__ = len(_a ) - max_length
snake_case__ = np.random.randint(0 , overflow + 1 )
snake_case__ = waveform[idx : idx + max_length]
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters )
snake_case__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case__ = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case__ = False
else:
snake_case__ = self._random_mel_fusion(_a , _a , _a )
snake_case__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
snake_case__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case__ = int(max_length / len(_a ) )
snake_case__ = np.stack(np.tile(_a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case__ = int(max_length / len(_a ) )
snake_case__ = np.stack(np.tile(_a , _a ) )
snake_case__ = np.pad(_a , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters )
snake_case__ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case__ = self._np_extract_fbank_features(_a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self:Union[str, Any] , _a:Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _a:str = None , _a:Optional[str] = None , _a:Optional[int] = None , _a:Optional[int] = None , _a:Optional[Union[str, TensorType]] = None , **_a:int , ):
snake_case__ = truncation if truncation is not None else self.truncation
snake_case__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
snake_case__ = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
snake_case__ = is_batched_numpy or (
isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ = [np.asarray(_a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a , np.ndarray ):
snake_case__ = np.asarray(_a , dtype=np.floataa )
elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case__ = [
self._get_input_mel(_a , max_length if max_length else self.nb_max_samples , _a , _a )
for waveform in raw_speech
]
snake_case__ = []
snake_case__ = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case__ = np.random.randint(0 , len(_a ) )
snake_case__ = True
if isinstance(input_mel[0] , _a ):
snake_case__ = [np.asarray(_a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case__ = [[longer] for longer in is_longer]
snake_case__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
snake_case__ = BatchFeature(_a )
if return_tensors is not None:
snake_case__ = input_features.convert_to_tensors(_a )
return input_features
| 33 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = 1
snake_case__ = 2
for i in range(2 , max_n + 1 ):
snake_case__ = pre_numerator
snake_case__ = 2 * i // 3 if i % 3 == 0 else 1
snake_case__ = cur_numerator
snake_case__ = e_cont * pre_numerator + temp
return sum_digits(__lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
snake_case__ = str(__lowerCAmelCase )
return n == n[::-1]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100_0000 ) -> Union[str, Any]:
snake_case__ = 0
for i in range(1 , __lowerCAmelCase ):
if is_palindrome(__lowerCAmelCase ) and is_palindrome(bin(__lowerCAmelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 33 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 1 |
from math import ceil
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 1001 ) -> int:
snake_case__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
snake_case__ = 2 * i + 1
snake_case__ = 2 * i
snake_case__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCamelCase__ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 33 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __magic_name__ :
'''simple docstring'''
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[int]=13 , _a:Any=64 , _a:Union[str, Any]=2 , _a:List[Any]=3 , _a:Optional[Any]="swish" , _a:Any=3 , _a:str=32 , _a:Optional[int]=0.1 , _a:Optional[int]=0.02 , _a:Optional[Any]=True , _a:Optional[Any]=True , _a:List[str]=10 , _a:List[str]=None , _a:str=0.25 , _a:Tuple=0.0 , _a:int=0.0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = make_divisible(5_12 * width_multiplier , divisor=8 )
snake_case__ = hidden_act
snake_case__ = conv_kernel_size
snake_case__ = output_stride
snake_case__ = classifier_dropout_prob
snake_case__ = use_labels
snake_case__ = is_training
snake_case__ = num_labels
snake_case__ = initializer_range
snake_case__ = scope
snake_case__ = width_multiplier
snake_case__ = ffn_dropout
snake_case__ = attn_dropout
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Dict , _a:List[Any] , _a:str , _a:Optional[int] ):
snake_case__ = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple , _a:str , _a:List[str] , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[str] , _a:List[str] , _a:str , _a:List[str] ):
snake_case__ = self.num_labels
snake_case__ = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowercase : Optional[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = False
__lowercase : Union[str, Any] = False
__lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = MobileViTVaModelTester(self )
snake_case__ = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self:str ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
def check_hidden_states_output(_a:Tuple , _a:str , _a:Any ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.hidden_states
snake_case__ = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ = model.to(_a )
snake_case__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = outputs.logits
# verify the logits
snake_case__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
snake_case__ = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ = model.to(_a )
snake_case__ = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
snake_case__ = outputs.logits.detach().cpu()
snake_case__ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
snake_case__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
snake_case__ = image_processor.post_process_semantic_segmentation(outputs=_a )
snake_case__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 33 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]:
return EnvironmentCommand()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
return EnvironmentCommand(args.accelerate_config_file )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:ArgumentParser ):
snake_case__ = parser.add_parser('''env''' )
download_parser.set_defaults(func=_a )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_a , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_a )
def __init__( self:List[str] , _a:Dict , *_a:Dict ):
snake_case__ = accelerate_config_file
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = '''not installed'''
if is_safetensors_available():
import safetensors
snake_case__ = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
snake_case__ = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
snake_case__ = '''not installed'''
snake_case__ = snake_case__ = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
snake_case__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_a ):
snake_case__ = load_config_from_file(self._accelerate_config_file ).to_dict()
snake_case__ = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_a , _a )
else F"""\t{accelerate_config}"""
)
snake_case__ = '''not installed'''
snake_case__ = '''NA'''
if is_torch_available():
import torch
snake_case__ = torch.__version__
snake_case__ = torch.cuda.is_available()
snake_case__ = '''not installed'''
snake_case__ = '''NA'''
if is_tf_available():
import tensorflow as tf
snake_case__ = tf.__version__
try:
# deprecated in v2.1
snake_case__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
snake_case__ = bool(tf.config.list_physical_devices('''GPU''' ) )
snake_case__ = '''not installed'''
snake_case__ = '''not installed'''
snake_case__ = '''not installed'''
snake_case__ = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
snake_case__ = flax.__version__
snake_case__ = jax.__version__
snake_case__ = jaxlib.__version__
snake_case__ = jax.lib.xla_bridge.get_backend().platform
snake_case__ = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_a ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Tuple ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
snake_case__ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
snake_case__ = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
snake_case__ = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
snake_case__ = model(_a , labels=_a ).loss
snake_case__ = -tf.math.reduce_mean(_a ).numpy()
snake_case__ = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 33 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 1 |
import qiskit
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 2 ) -> qiskit.result.counts.Counts:
snake_case__ = qubits
# Using Aer's simulator
snake_case__ = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
snake_case__ = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowerCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowerCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowerCAmelCase ) ) , list(range(__lowerCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
snake_case__ = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 )
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __magic_name__ :
'''simple docstring'''
def __init__( self:List[str] , _a:Optional[Any] , _a:Any=13 , _a:Union[str, Any]=2 , _a:Dict=24 , _a:Optional[Any]=16 , _a:Dict=True , _a:str=True , _a:Tuple=32 , _a:str=5 , _a:Dict=4 , _a:List[str]=37 , _a:Any="gelu" , _a:Optional[int]=0.1 , _a:Any=0.1 , _a:Any=10 , _a:List[str]=0.02 , _a:Optional[Any]=None , _a:Union[str, Any]=2 , _a:int=2 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = patch_size
snake_case__ = max_length
snake_case__ = num_mel_bins
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = scope
snake_case__ = frequency_stride
snake_case__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case__ = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case__ = frequency_out_dimension * time_out_dimension
snake_case__ = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:List[Any] , _a:Dict , _a:Any ):
snake_case__ = ASTModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__lowercase : Optional[Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[int] = False
__lowercase : List[Any] = False
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Union[str, Any] , _a:Optional[int] , _a:List[str] , _a:Optional[Any] , _a:Tuple ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = ASTModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''input_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:str ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = ASTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case__ , snake_case__ = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.default_feature_extractor
snake_case__ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(_a )
snake_case__ = self.default_feature_extractor
snake_case__ , snake_case__ = prepare_audio()
snake_case__ = audio.squeeze().numpy()
snake_case__ = feature_extractor(_a , sampling_rate=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 33 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
def count_of_possible_combinations(__lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
def count_of_possible_combinations_with_dp_array(
__lowerCAmelCase , __lowerCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
snake_case__ = sum(
count_of_possible_combinations_with_dp_array(target - item , __lowerCAmelCase )
for item in array )
snake_case__ = answer
return answer
snake_case__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
snake_case__ = [0] * (target + 1)
snake_case__ = 1
for i in range(1 , target + 1 ):
for j in range(__lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : Any = 3
lowerCamelCase__ : int = 5
lowerCamelCase__ : List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 33 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case__ , snake_case__ = array[indexa], array[indexa]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
snake_case__ = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
snake_case__ = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ : int = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 33 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {"""vocab_file""": """sentencepiece.model"""}
lowerCamelCase__ : Dict = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
lowerCamelCase__ : List[str] = {
"""google/rembert""": 2_5_6,
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[Any] = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self:Union[str, Any] , _a:Union[str, Any] , _a:Tuple=False , _a:Dict=True , _a:int=True , _a:Optional[Any]="[CLS]" , _a:int="[SEP]" , _a:Dict="[UNK]" , _a:Optional[int]="[SEP]" , _a:Any="[PAD]" , _a:Union[str, Any]="[CLS]" , _a:Dict="[MASK]" , **_a:str , ):
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
snake_case__ = do_lower_case
snake_case__ = remove_space
snake_case__ = keep_accents
snake_case__ = vocab_file
snake_case__ = spm.SentencePieceProcessor()
self.sp_model.Load(_a )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self:str ):
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self:List[Any] , _a:Optional[int] ):
snake_case__ = d
snake_case__ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple , _a:Any=False ):
snake_case__ = self.sp_model.EncodeAsPieces(_a )
return pieces
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Optional[int] ):
return self.sp_model.PieceToId(_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Optional[int] ):
return self.sp_model.IdToPiece(_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Union[str, Any] ):
snake_case__ = self.sp_model.decode_pieces(_a )
return out_string
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:List[int] , _a:Optional[List[int]] = None , _a:bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[int] , _a:Optional[List[int]] = None ):
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:str , _a:Optional[str] = None ):
if not os.path.isdir(_a ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_a ) )
return
snake_case__ = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 33 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = CLIPTokenizer
__lowercase : str = CLIPTokenizerFast
__lowercase : Dict = True
__lowercase : Any = {}
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().setUp()
# fmt: off
snake_case__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case__ = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
snake_case__ = {'''unk_token''': '''<unk>'''}
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , **_a:Optional[int] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , **_a:Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:List[str] ):
snake_case__ = '''lower newer'''
snake_case__ = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ = '''lower newer'''
snake_case__ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
snake_case__ = tokens + [tokenizer.unk_token]
snake_case__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = self.tokenizer_class.from_pretrained(_a , **_a )
snake_case__ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
snake_case__ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case__ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of space type
snake_case__ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
# Test that the tokenization is identical on unicode of line break type
snake_case__ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case__ = tokenizer_s.tokenize(_a )
snake_case__ = tokenizer_r.tokenize(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ = F"""{text_of_1_token} {text_of_1_token}"""
snake_case__ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
snake_case__ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
snake_case__ = F""" {text}"""
snake_case__ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , )
snake_case__ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
def SCREAMING_SNAKE_CASE__ ( self:str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_a ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def SCREAMING_SNAKE_CASE__ ( self:int ):
super().test_tokenization_python_rust_equals()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
# CLIP always lower cases letters
pass
| 33 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = ['pixel_values']
def __init__( self:Union[str, Any] , _a:bool = True , _a:Dict[str, int] = None , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:bool = True , _a:Dict[str, int] = None , _a:bool = True , _a:Union[int, float] = 1 / 2_55 , _a:bool = True , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = True , **_a:Tuple , ):
super().__init__(**_a )
snake_case__ = size if size is not None else {'''shortest_edge''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a )
snake_case__ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
snake_case__ = get_size_dict(_a , default_to_square=_a , param_name='''crop_size''' )
snake_case__ = do_resize
snake_case__ = size
snake_case__ = resample
snake_case__ = do_center_crop
snake_case__ = crop_size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case__ = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case__ = do_convert_rgb
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:np.ndarray , _a:Dict[str, int] , _a:PILImageResampling = PILImageResampling.BICUBIC , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Dict , ):
snake_case__ = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case__ = get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:np.ndarray , _a:Dict[str, int] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Optional[int] , ):
snake_case__ = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:np.ndarray , _a:Union[int, float] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:str , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:np.ndarray , _a:Union[float, List[float]] , _a:Union[float, List[float]] , _a:Optional[Union[str, ChannelDimension]] = None , **_a:Optional[Any] , ):
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:ImageInput , _a:bool = None , _a:Dict[str, int] = None , _a:PILImageResampling = None , _a:bool = None , _a:int = None , _a:bool = None , _a:float = None , _a:bool = None , _a:Optional[Union[float, List[float]]] = None , _a:Optional[Union[float, List[float]]] = None , _a:bool = None , _a:Optional[Union[str, TensorType]] = None , _a:Optional[ChannelDimension] = ChannelDimension.FIRST , **_a:int , ):
snake_case__ = do_resize if do_resize is not None else self.do_resize
snake_case__ = size if size is not None else self.size
snake_case__ = get_size_dict(_a , param_name='''size''' , default_to_square=_a )
snake_case__ = resample if resample is not None else self.resample
snake_case__ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ = crop_size if crop_size is not None else self.crop_size
snake_case__ = get_size_dict(_a , param_name='''crop_size''' , default_to_square=_a )
snake_case__ = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ = image_mean if image_mean is not None else self.image_mean
snake_case__ = image_std if image_std is not None else self.image_std
snake_case__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case__ = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case__ = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
snake_case__ = [to_numpy_array(_a ) for image in images]
if do_resize:
snake_case__ = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
snake_case__ = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
snake_case__ = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
snake_case__ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
snake_case__ = [to_channel_dimension_format(_a , _a ) for image in images]
snake_case__ = {'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a )
| 33 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __magic_name__ :
'''simple docstring'''
__lowercase : int = BlenderbotConfig
__lowercase : Any = {}
__lowercase : Optional[Any] = 'gelu'
def __init__( self:Tuple , _a:Optional[Any] , _a:Optional[Any]=13 , _a:Tuple=7 , _a:Union[str, Any]=True , _a:int=False , _a:int=99 , _a:Optional[int]=32 , _a:List[str]=2 , _a:List[str]=4 , _a:List[Any]=37 , _a:Any=0.1 , _a:int=0.1 , _a:List[Any]=20 , _a:List[str]=2 , _a:int=1 , _a:Dict=0 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = eos_token_id
snake_case__ = pad_token_id
snake_case__ = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case__ = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[Any] , _a:int ):
snake_case__ = TFBlenderbotModel(config=_a ).get_decoder()
snake_case__ = inputs_dict['''input_ids''']
snake_case__ = input_ids[:1, :]
snake_case__ = inputs_dict['''attention_mask'''][:1, :]
snake_case__ = inputs_dict['''head_mask''']
snake_case__ = 1
# first forward pass
snake_case__ = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
snake_case__ , snake_case__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ = model(_a , attention_mask=_a )[0]
snake_case__ = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Tuple:
if attention_mask is None:
snake_case__ = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__lowercase : Tuple = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowercase : Any = True
__lowercase : int = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFBlenderbotModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ['My friends are cool but they eat too many carbs.']
__lowercase : Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer(self.src_text , return_tensors='''tf''' )
snake_case__ = self.model.generate(
model_inputs.input_ids , )
snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[int]:
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
snake_case__ = [True] * (num + 1)
snake_case__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
snake_case__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : int = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 33 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
snake_case__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case__ = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
snake_case__ = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
snake_case__ = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
snake_case__ = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
snake_case__ = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
snake_case__ = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
snake_case__ = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
snake_case__ = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
snake_case__ = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
snake_case__ = key.replace('''image_encoder.module''' , '''flava.image_model''' )
snake_case__ = key.replace('''text_encoder.module''' , '''flava.text_model''' )
snake_case__ = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
snake_case__ = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
snake_case__ = key.replace('''text_projection''' , '''flava.text_projection''' )
snake_case__ = key.replace('''image_projection''' , '''flava.image_projection''' )
snake_case__ = value.float()
for key, value in codebook_state_dict.items():
snake_case__ = value
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Any:
if config_path is not None:
snake_case__ = FlavaConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case__ = FlavaConfig()
snake_case__ = FlavaForPreTraining(__lowerCAmelCase ).eval()
snake_case__ = convert_dalle_checkpoint(__lowerCAmelCase , __lowerCAmelCase , save_checkpoint=__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
snake_case__ = torch.load(__lowerCAmelCase , map_location='''cpu''' )
else:
snake_case__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )
snake_case__ = upgrade_state_dict(__lowerCAmelCase , __lowerCAmelCase )
hf_model.load_state_dict(__lowerCAmelCase )
snake_case__ = hf_model.state_dict()
snake_case__ = count_parameters(__lowerCAmelCase )
snake_case__ = count_parameters(__lowerCAmelCase ) + count_parameters(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : int = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 33 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = 't5'
__lowercase : Union[str, Any] = ['past_key_values']
__lowercase : Optional[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self:Tuple , _a:Any=3_21_28 , _a:Union[str, Any]=5_12 , _a:str=64 , _a:List[Any]=20_48 , _a:Dict=6 , _a:Tuple=None , _a:str=8 , _a:List[str]=32 , _a:Dict=1_28 , _a:Any=0.1 , _a:List[str]=1e-6 , _a:List[Any]=1.0 , _a:Tuple="relu" , _a:Union[str, Any]=True , _a:int=True , _a:Dict=0 , _a:Optional[Any]=1 , **_a:str , ):
snake_case__ = vocab_size
snake_case__ = d_model
snake_case__ = d_kv
snake_case__ = d_ff
snake_case__ = num_layers
snake_case__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case__ = num_heads
snake_case__ = relative_attention_num_buckets
snake_case__ = relative_attention_max_distance
snake_case__ = dropout_rate
snake_case__ = layer_norm_epsilon
snake_case__ = initializer_factor
snake_case__ = feed_forward_proj
snake_case__ = use_cache
snake_case__ = self.feed_forward_proj.split('''-''' )
snake_case__ = act_info[-1]
snake_case__ = act_info[0] == '''gated'''
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case__ = '''gelu_new'''
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
snake_case__ = '''past_encoder_sequence + sequence'''
snake_case__ = {0: '''batch'''}
snake_case__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return 13
| 33 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
'''simple docstring'''
def __init__( self:Optional[Any] , _a:int , _a:str=3 , _a:Optional[int]=32 , _a:Optional[Any]=3 , _a:Tuple=10 , _a:List[Any]=[8, 16, 32, 64] , _a:str=[1, 1, 2, 1] , _a:Any=True , _a:List[Any]=True , _a:List[str]="relu" , _a:int=3 , _a:Tuple=None , _a:Tuple=["stage2", "stage3", "stage4"] , _a:List[Any]=[2, 3, 4] , _a:Union[str, Any]=1 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = num_channels
snake_case__ = embeddings_size
snake_case__ = hidden_sizes
snake_case__ = depths
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_act
snake_case__ = num_labels
snake_case__ = scope
snake_case__ = len(_a )
snake_case__ = out_features
snake_case__ = out_indices
snake_case__ = num_groups
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:Tuple , _a:int ):
snake_case__ = BitModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Any , _a:Union[str, Any] ):
snake_case__ = self.num_labels
snake_case__ = BitForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:str , _a:List[str] , _a:Any ):
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case__ = None
snake_case__ = BitBackbone(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowercase : int = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = BitModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
def check_hidden_states_output(_a:List[Any] , _a:int , _a:Union[str, Any] ):
snake_case__ = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
snake_case__ = model(**self._prepare_for_class(_a , _a ) )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ = layer_type
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> Any:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__lowercase : int = BitConfig
__lowercase : Any = False
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BitModelTester(self )
| 33 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase__ : Dict = logging.getLogger(__name__)
lowerCamelCase__ : Union[str, Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase__ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(snake_case_ )} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} ,)
__lowercase : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=snake_case_ ,metadata={'help': 'The input training data file (a text file).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} ,)
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowercase : Optional[int] = field(
default=5 ,metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={'help': 'The number of processes to use for the preprocessing.'} ,)
__lowercase : float = field(
default=0.15 ,metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowercase : bool = field(
default=snake_case_ ,metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} ,)
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self.train_file is not None:
snake_case__ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case__ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
with open(__lowerCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case__ = [json.loads(__lowerCAmelCase ) for line in f.read().splitlines() if (len(__lowerCAmelCase ) > 0 and not line.isspace())]
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case__ = {c: dataset[c] for c in dataset.column_names}
snake_case__ = refs
return Dataset.from_dict(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
snake_case__ = {}
if data_args.train_file is not None:
snake_case__ = data_args.train_file
if data_args.validation_file is not None:
snake_case__ = data_args.validation_file
snake_case__ = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
snake_case__ = '''text'''
snake_case__ = load_dataset(__lowerCAmelCase , data_files=__lowerCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ = AutoConfig.from_pretrained(model_args.config_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
snake_case__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
snake_case__ = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowerCAmelCase )
elif model_args.model_name_or_path:
snake_case__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowerCAmelCase )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
snake_case__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case__ = AutoModelForMaskedLM.from_config(__lowerCAmelCase )
model.resize_token_embeddings(len(__lowerCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case__ = datasets['''train'''].column_names
else:
snake_case__ = datasets['''validation'''].column_names
snake_case__ = '''text''' if '''text''' in column_names else column_names[0]
snake_case__ = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__lowerCAmelCase ):
# Remove empty lines
snake_case__ = [line for line in examples['''text'''] if len(__lowerCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=data_args.max_seq_length )
snake_case__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case__ = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case__ = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case__ = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case__ = DataCollatorForWholeWordMask(tokenizer=__lowerCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case__ = model_args.model_name_or_path
else:
snake_case__ = None
snake_case__ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case__ = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
snake_case__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ = trainer.evaluate()
snake_case__ = math.exp(eval_output['''eval_loss'''] )
snake_case__ = perplexity
snake_case__ = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 33 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase__ : Any = """\
"""
lowerCamelCase__ : List[str] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase__ : Any = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:List[Any] , _a:int = 16 , _a:bool = True , _a:Any=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case__ = '''cuda'''
else:
snake_case__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case__ = AutoModelForCausalLM.from_pretrained(_a )
snake_case__ = model.to(_a )
snake_case__ = AutoTokenizer.from_pretrained(_a )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_a ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case__ = model.config.max_length - 1
else:
snake_case__ = model.config.max_length
snake_case__ = tokenizer(
_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors='''pt''' , return_attention_mask=_a , ).to(_a )
snake_case__ = encodings['''input_ids''']
snake_case__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case__ = []
snake_case__ = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ):
snake_case__ = min(start_index + batch_size , len(_a ) )
snake_case__ = encoded_texts[start_index:end_index]
snake_case__ = attn_masks[start_index:end_index]
if add_start_token:
snake_case__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a )
snake_case__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 )
snake_case__ = encoded_batch
with torch.no_grad():
snake_case__ = model(_a , attention_mask=_a ).logits
snake_case__ = out_logits[..., :-1, :].contiguous()
snake_case__ = labels[..., 1:].contiguous()
snake_case__ = attn_mask[..., 1:].contiguous()
snake_case__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Union[str, Any] = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 33 |
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase__ : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case__ = g.get_repo('''huggingface/diffusers''' )
snake_case__ = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case__ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
snake_case__ = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 33 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Any , *_a:List[Any] , **_a:Any ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = IFImgaImgSuperResolutionPipeline
__lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:Optional[Any]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
snake_case__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_a ) ).to(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 33 | 1 |
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Image:
def brightness(__lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowerCamelCase__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 33 |
import math
class __magic_name__ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:list[list[float]] , _a:list[int] ):
snake_case__ = 0.0
snake_case__ = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:list[list[int | float]] , _a:list[int] , _a:int , _a:float ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE ( ) -> None:
# Training Examples ( m, n )
snake_case__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
snake_case__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
snake_case__ = SelfOrganizingMap()
snake_case__ = 3
snake_case__ = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
snake_case__ = training_samples[j]
# Compute the winning vector
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
snake_case__ = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
snake_case__ = [0, 0, 0, 1]
snake_case__ = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 33 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = ['image_processor', 'tokenizer']
__lowercase : str = 'AutoImageProcessor'
__lowercase : Dict = 'AutoTokenizer'
def __init__( self:int , _a:List[str]=None , _a:Optional[Any]=None , **_a:List[str] ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
snake_case__ = self.image_processor
snake_case__ = False
def __call__( self:Optional[int] , *_a:str , **_a:int ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_a , **_a )
snake_case__ = kwargs.pop('''images''' , _a )
snake_case__ = kwargs.pop('''text''' , _a )
if len(_a ) > 0:
snake_case__ = args[0]
snake_case__ = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case__ = self.image_processor(_a , *_a , **_a )
if text is not None:
snake_case__ = self.tokenizer(_a , **_a )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Union[str, Any] , **_a:Any ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , *_a:Union[str, Any] , **_a:Optional[int] ):
return self.tokenizer.decode(*_a , **_a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
snake_case__ = True
snake_case__ = self.tokenizer
yield
snake_case__ = self.image_processor
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Dict , _a:Dict=False , _a:Optional[int]=None ):
if added_vocab is None:
snake_case__ = self.tokenizer.get_added_vocab()
snake_case__ = {}
while tokens:
snake_case__ = re.search(r'''<s_(.*?)>''' , _a , re.IGNORECASE )
if start_token is None:
break
snake_case__ = start_token.group(1 )
snake_case__ = re.search(rF"""</s_{key}>""" , _a , re.IGNORECASE )
snake_case__ = start_token.group()
if end_token is None:
snake_case__ = tokens.replace(_a , '''''' )
else:
snake_case__ = end_token.group()
snake_case__ = re.escape(_a )
snake_case__ = re.escape(_a )
snake_case__ = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , _a , re.IGNORECASE )
if content is not None:
snake_case__ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case__ = self.tokenajson(_a , is_inner_value=_a , added_vocab=_a )
if value:
if len(_a ) == 1:
snake_case__ = value[0]
snake_case__ = value
else: # leaf nodes
snake_case__ = []
for leaf in content.split(r'''<sep/>''' ):
snake_case__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case__ = leaf[1:-2] # for categorical special tokens
output[key].append(_a )
if len(output[key] ) == 1:
snake_case__ = output[key][0]
snake_case__ = tokens[tokens.find(_a ) + len(_a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_a , added_vocab=_a )
if len(_a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[int]:
snake_case__ , snake_case__ = image.size
snake_case__ , snake_case__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case__ = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
snake_case__ = image[None].transpose(0 , 3 , 1 , 2 )
snake_case__ = torch.from_numpy(__lowerCAmelCase )
return 2.0 * image - 1.0
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[str] , _a:VQModel , _a:UNetaDModel , _a:Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self:int , _a:Union[torch.Tensor, PIL.Image.Image] = None , _a:Optional[int] = 1 , _a:Optional[int] = 1_00 , _a:Optional[float] = 0.0 , _a:Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a:Optional[str] = "pil" , _a:bool = True , ):
if isinstance(_a , PIL.Image.Image ):
snake_case__ = 1
elif isinstance(_a , torch.Tensor ):
snake_case__ = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}""" )
if isinstance(_a , PIL.Image.Image ):
snake_case__ = preprocess(_a )
snake_case__ , snake_case__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case__ = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case__ = next(self.unet.parameters() ).dtype
snake_case__ = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
snake_case__ = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
snake_case__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case__ = {}
if accepts_eta:
snake_case__ = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
snake_case__ = torch.cat([latents, image] , dim=1 )
snake_case__ = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
snake_case__ = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
snake_case__ = self.vqvae.decode(_a ).sample
snake_case__ = torch.clamp(_a , -1.0 , 1.0 )
snake_case__ = image / 2 + 0.5
snake_case__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case__ = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 33 |
lowerCamelCase__ : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
snake_case__ = Stack()
snake_case__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ = operator_stack.peek()
operator_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operand_stack.peek()
operand_stack.pop()
snake_case__ = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 33 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 33 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCamelCase__ : int = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:List[Any] , *_a:Dict , **_a:Tuple ):
warnings.warn(
'''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PerceiverImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 33 | 1 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
snake_case__ = gray_code_sequence_string(__lowerCAmelCase )
#
# convert them to integers
for i in range(len(__lowerCAmelCase ) ):
snake_case__ = int(sequence[i] , 2 )
return sequence
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case__ = gray_code_sequence_string(bit_count - 1 )
snake_case__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case__ = '''0''' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case__ = '''1''' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Tuple = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str , _a:Dict ):
snake_case__ = 3
snake_case__ = 2_50
snake_case__ = ids_tensor((batch_size, length) , _a )
snake_case__ = torch.ones((batch_size, length) , device=_a , dtype=torch.float ) / length
return input_ids, scores
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self._get_tensors(5 )
snake_case__ = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = MaxLengthCriteria(max_length=10 )
snake_case__ , snake_case__ = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case__ , snake_case__ = self._get_tensors(5 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(9 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ , snake_case__ = self._get_tensors(10 )
self.assertTrue(criteria(_a , _a ) )
snake_case__ = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ , snake_case__ = self._get_tensors(5 )
snake_case__ = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_a , _a ) )
snake_case__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_a , _a ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_a ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
snake_case__ = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_a ) , 1 )
| 33 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
snake_case__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ = ''''''
else:
snake_case__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[
: config.hidden_size, :
]
snake_case__ = in_proj_bias[: config.hidden_size]
snake_case__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
snake_case__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = dct.pop(__lowerCAmelCase )
snake_case__ = val
def SCREAMING_SNAKE_CASE ( ) -> str:
snake_case__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = ViTConfig()
snake_case__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
snake_case__ = True
snake_case__ = int(vit_name[-12:-10] )
snake_case__ = int(vit_name[-9:-6] )
else:
snake_case__ = 1000
snake_case__ = '''huggingface/label-files'''
snake_case__ = '''imagenet-1k-id2label.json'''
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = int(vit_name[-6:-4] )
snake_case__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
snake_case__ = 192
snake_case__ = 768
snake_case__ = 12
snake_case__ = 3
elif vit_name[9:].startswith('''small''' ):
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
snake_case__ = 768
snake_case__ = 2304
snake_case__ = 8
snake_case__ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
elif vit_name[4:].startswith('''huge''' ):
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
# load original model from timm
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
snake_case__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ = ViTModel(__lowerCAmelCase ).eval()
else:
snake_case__ = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
snake_case__ = DeiTImageProcessor(size=config.image_size )
else:
snake_case__ = ViTImageProcessor(size=config.image_size )
snake_case__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case__ = encoding['''pixel_values''']
snake_case__ = model(__lowerCAmelCase )
if base_model:
snake_case__ = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
snake_case__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase__ : str = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 33 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''ZinengTang/tvlt-base'''
snake_case__ = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self:Dict , **_a:List[Any] ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , **_a:Tuple ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
snake_case__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = feature_extractor(_a , return_tensors='''np''' )
snake_case__ = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = image_processor(_a , return_tensors='''np''' )
snake_case__ = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
snake_case__ = np.ones([1_20_00] )
snake_case__ = np.ones([3, 2_24, 2_24] )
snake_case__ = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.get_image_processor()
snake_case__ = self.get_feature_extractor()
snake_case__ = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 33 | 1 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
if not is_accelerate_available():
return method
snake_case__ = version.parse(accelerate.__version__ ).base_version
if version.parse(__lowerCAmelCase ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *__lowerCAmelCase , **__lowerCAmelCase ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *__lowerCAmelCase , **__lowerCAmelCase )
return wrapper
| 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = 'data2vec-vision'
def __init__( self:int , _a:Tuple=7_68 , _a:int=12 , _a:Any=12 , _a:Optional[int]=30_72 , _a:Optional[int]="gelu" , _a:Any=0.0 , _a:Any=0.0 , _a:List[str]=0.02 , _a:Dict=1e-12 , _a:Tuple=2_24 , _a:Any=16 , _a:str=3 , _a:str=False , _a:Union[str, Any]=False , _a:Optional[int]=False , _a:Any=False , _a:Dict=0.1 , _a:Dict=0.1 , _a:str=True , _a:str=[3, 5, 7, 11] , _a:List[str]=[1, 2, 3, 6] , _a:List[str]=True , _a:Any=0.4 , _a:str=2_56 , _a:Union[str, Any]=1 , _a:int=False , _a:Optional[int]=2_55 , **_a:Dict , ):
super().__init__(**_a )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Any = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return 1e-4
| 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : Any = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
import os
import sys
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCamelCase__ : Optional[int] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 33 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = BertJapaneseTokenizer
__lowercase : Optional[Any] = False
__lowercase : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
super().setUp()
snake_case__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Dict ):
snake_case__ = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case__ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[int] ):
snake_case__ , snake_case__ = self.get_input_output_texts(_a )
snake_case__ = tokenizer.encode(_a , add_special_tokens=_a )
snake_case__ = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:int ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.tokenizer_class(self.vocab_file )
snake_case__ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_a )
snake_case__ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
snake_case__ = pickle.load(_a )
snake_case__ = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
try:
snake_case__ = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
try:
snake_case__ = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = MecabTokenizer(do_lower_case=_a , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
try:
snake_case__ = MecabTokenizer(
do_lower_case=_a , normalize_text=_a , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = MecabTokenizer(normalize_text=_a , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_a )
snake_case__ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
snake_case__ = pickle.load(_a )
snake_case__ = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = SudachiTokenizer(do_lower_case=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = SudachiTokenizer(normalize_text=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = SudachiTokenizer(trim_whitespace=_a , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_a )
snake_case__ = '''こんにちは、世界。\nこんばんは、世界。'''
snake_case__ = tokenizer.tokenize(_a )
self.assertListEqual(_a , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case__ = os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(_a , '''wb''' ) as handle:
pickle.dump(_a , _a )
with open(_a , '''rb''' ) as handle:
snake_case__ = pickle.load(_a )
snake_case__ = tokenizer_new.tokenize(_a )
self.assertListEqual(_a , _a )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = JumanppTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = JumanppTokenizer(normalize_text=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = JumanppTokenizer(trim_whitespace=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
snake_case__ = {}
for i, token in enumerate(_a ):
snake_case__ = i
snake_case__ = WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
snake_case__ = tokenizer.subword_tokenizer
snake_case__ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_a , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
snake_case__ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_a , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
snake_case__ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_a )
snake_case__ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = BertJapaneseTokenizer
__lowercase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().setUp()
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , **_a:Tuple ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[Any] ):
snake_case__ = '''こんにちは、世界。 \nこんばんは、世界。'''
snake_case__ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self:Any ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:str ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
snake_case__ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_a , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
snake_case__ = {}
for i, token in enumerate(_a ):
snake_case__ = i
snake_case__ = CharacterTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
snake_case__ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_a )
snake_case__ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a )
snake_case__ = tokenizer.build_inputs_with_special_tokens(_a , _a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = '''cl-tohoku/bert-base-japanese'''
snake_case__ = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
snake_case__ = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_a )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 33 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = (CMStochasticIterativeScheduler,)
__lowercase : List[str] = 10
def SCREAMING_SNAKE_CASE__ ( self:int , **_a:Optional[int] ):
snake_case__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_a )
return config
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = 10
snake_case__ = self.get_scheduler_config()
snake_case__ = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps[0]
snake_case__ = scheduler.timesteps[1]
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
snake_case__ = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = 1
scheduler.set_timesteps(_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [1_06, 0]
scheduler.set_timesteps(timesteps=_a )
snake_case__ = scheduler.timesteps
snake_case__ = torch.manual_seed(0 )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case__ = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
snake_case__ = model(_a , _a )
# 3. predict previous sample x_t-1
snake_case__ = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
snake_case__ = pred_prev_sample
snake_case__ = torch.sum(torch.abs(_a ) )
snake_case__ = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [39, 30, 12, 1, 0]
snake_case__ = len(_a )
with self.assertRaises(_a , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**_a )
snake_case__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_a )
| 33 | 1 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = ['vqvae']
def __init__( self:Any , _a:AutoencoderKL , _a:UNetaDConditionModel , _a:Mel , _a:Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=_a , scheduler=_a , mel=_a , vqvae=_a )
def SCREAMING_SNAKE_CASE__ ( self:int ):
return 50 if isinstance(self.scheduler , _a ) else 10_00
@torch.no_grad()
def __call__( self:Tuple , _a:int = 1 , _a:str = None , _a:np.ndarray = None , _a:int = 0 , _a:int = 0 , _a:int = None , _a:torch.Generator = None , _a:float = 0 , _a:float = 0 , _a:torch.Generator = None , _a:float = 0 , _a:torch.Tensor = None , _a:torch.Tensor = None , _a:Union[str, Any]=True , ):
snake_case__ = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
snake_case__ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
snake_case__ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case__ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_a , device=self.device , )
snake_case__ = noise
snake_case__ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a , _a )
snake_case__ = self.mel.audio_slice_to_image(_a )
snake_case__ = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
snake_case__ = (input_image / 2_55) * 2 - 1
snake_case__ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
snake_case__ = self.vqvae.encode(torch.unsqueeze(_a , 0 ) ).latent_dist.sample(
generator=_a )[0]
snake_case__ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case__ = self.scheduler.add_noise(_a , _a , self.scheduler.timesteps[start_step - 1] )
snake_case__ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case__ = int(mask_start_secs * pixels_per_second )
snake_case__ = int(mask_end_secs * pixels_per_second )
snake_case__ = self.scheduler.add_noise(_a , _a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _a ):
snake_case__ = self.unet(_a , _a , _a )['''sample''']
else:
snake_case__ = self.unet(_a , _a )['''sample''']
if isinstance(self.scheduler , _a ):
snake_case__ = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , eta=_a , generator=_a , )['''prev_sample''']
else:
snake_case__ = self.scheduler.step(
model_output=_a , timestep=_a , sample=_a , generator=_a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
snake_case__ = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case__ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case__ = 1 / self.vqvae.config.scaling_factor * images
snake_case__ = self.vqvae.decode(_a )['''sample''']
snake_case__ = (images / 2 + 0.5).clamp(0 , 1 )
snake_case__ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
snake_case__ = (images * 2_55).round().astype('''uint8''' )
snake_case__ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a , mode='''RGB''' ).convert('''L''' ) for _ in images) )
snake_case__ = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) , **ImagePipelineOutput(_a ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:List[Image.Image] , _a:int = 50 ):
assert isinstance(self.scheduler , _a )
self.scheduler.set_timesteps(_a )
snake_case__ = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
snake_case__ = (sample / 2_55) * 2 - 1
snake_case__ = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
snake_case__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case__ = self.scheduler.alphas_cumprod[t]
snake_case__ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case__ = 1 - alpha_prod_t
snake_case__ = self.unet(_a , _a )['''sample''']
snake_case__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:torch.Tensor , _a:torch.Tensor , _a:float ):
snake_case__ = acos(torch.dot(torch.flatten(_a ) , torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 33 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> np.ndarray:
return vector * sigmoid(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['image_processor', 'tokenizer']
__lowercase : str = 'LayoutLMv3ImageProcessor'
__lowercase : Tuple = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self:Union[str, Any] , _a:Dict=None , _a:Optional[Any]=None , **_a:Tuple ):
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
snake_case__ = kwargs.pop('''feature_extractor''' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self:List[Any] , _a:int , _a:Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a:Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _a:Union[List[List[int]], List[List[List[int]]]] = None , _a:Optional[Union[List[int], List[List[int]]]] = None , _a:bool = True , _a:Union[bool, str, PaddingStrategy] = False , _a:Union[bool, str, TruncationStrategy] = None , _a:Optional[int] = None , _a:int = 0 , _a:Optional[int] = None , _a:Optional[bool] = None , _a:Optional[bool] = None , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = True , _a:Optional[Union[str, TensorType]] = None , **_a:List[str] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
snake_case__ = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ = features['''words''']
snake_case__ = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
snake_case__ = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
snake_case__ = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
snake_case__ = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int , _a:int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(_a )} and {len(_a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self:Any , *_a:Optional[int] , **_a:Tuple ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , *_a:str , **_a:Dict ):
return self.tokenizer.decode(*_a , **_a )
@property
def SCREAMING_SNAKE_CASE__ ( self:int ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def SCREAMING_SNAKE_CASE__ ( self:str ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 33 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 100 ) -> int:
snake_case__ = set()
snake_case__ = 0
snake_case__ = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case__ = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 33 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
lowerCamelCase__ : str = True
from torch.cuda.amp import autocast
lowerCamelCase__ : List[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
__lowercase : Optional[bool] = field(
default=snake_case_ ,metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__lowercase : Optional[bool] = field(
default=snake_case_ ,metadata={'help': 'Whether to log verbose messages or not.'} ,)
__lowercase : Optional[float] = field(
default=2.0 ,metadata={'help': 'Maximum temperature for gumbel softmax.'} )
__lowercase : Optional[float] = field(
default=0.5 ,metadata={'help': 'Minimum temperature for gumbel softmax.'} )
__lowercase : Optional[float] = field(
default=0.99_99_95 ,metadata={'help': 'Decay of gumbel temperature during training.'} )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ = logging.WARNING
if model_args.verbose_logging:
snake_case__ = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
snake_case__ = logging.INFO
logger.setLevel(__lowerCAmelCase )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : str = field(
default=snake_case_ ,metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default=snake_case_ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(
default='train' ,metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} ,)
__lowercase : Optional[str] = field(
default='validation' ,metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} ,)
__lowercase : Optional[str] = field(
default='file' ,metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} ,)
__lowercase : bool = field(
default=snake_case_ ,metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__lowercase : Optional[int] = field(
default=1 ,metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} ,)
__lowercase : Optional[int] = field(
default=snake_case_ ,metadata={'help': 'The number of processes to use for the preprocessing.'} ,)
__lowercase : Optional[float] = field(
default=20.0 ,metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __magic_name__ :
'''simple docstring'''
__lowercase : WavaVecaForPreTraining
__lowercase : WavaVecaFeatureExtractor
__lowercase : Union[bool, str] = "longest"
__lowercase : Optional[int] = None
__lowercase : Optional[int] = None
def __call__( self:Optional[Any] , _a:List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
snake_case__ = self.feature_extractor.pad(
_a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
snake_case__ = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
snake_case__ = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
snake_case__ = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
snake_case__ = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
snake_case__ = 1
snake_case__ = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
snake_case__ = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_a , min_masks=2 , )
return batch
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:str , *_a:List[Any] , _a:Dict=1 , _a:List[str]=0 , _a:Union[str, Any]=1.0 , **_a:Optional[Any] ):
super().__init__(*_a , **_a )
snake_case__ = 0
snake_case__ = max_gumbel_temp
snake_case__ = min_gumbel_temp
snake_case__ = gumbel_temp_decay
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:nn.Module , _a:Dict[str, Union[torch.Tensor, Any]] ):
model.train()
snake_case__ = self._prepare_inputs(_a )
if self.use_amp:
with autocast():
snake_case__ = self.compute_loss(_a , _a )
else:
snake_case__ = self.compute_loss(_a , _a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
snake_case__ = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
snake_case__ = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
snake_case__ = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_a ).backward()
elif self.use_apex:
with amp.scale_loss(_a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
configure_logger(__lowerCAmelCase , __lowerCAmelCase )
# Downloading and loading a dataset from the hub.
snake_case__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
snake_case__ = DatasetDict()
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
snake_case__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
snake_case__ = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__lowerCAmelCase )
def prepare_dataset(__lowerCAmelCase ):
# check that all files have the correct sampling rate
snake_case__ , snake_case__ = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
snake_case__ = datasets.map(
__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
snake_case__ = vectorized_datasets.filter(
lambda __lowerCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__lowerCAmelCase ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
snake_case__ = vectorized_datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
snake_case__ = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
snake_case__ = WavaVecaForPreTraining(__lowerCAmelCase )
snake_case__ = DataCollatorForWavaVecaPretraining(model=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
snake_case__ = WavaVecaPreTrainer(
model=__lowerCAmelCase , data_collator=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=__lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 33 |
from copy import deepcopy
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:list[int] | None = None , _a:int | None = None ):
if arr is None and size is not None:
snake_case__ = size
snake_case__ = [0] * size
elif arr is not None:
self.init(_a )
else:
raise ValueError('''Either arr or size must be specified''' )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:list[int] ):
snake_case__ = len(_a )
snake_case__ = deepcopy(_a )
for i in range(1 , self.size ):
snake_case__ = self.next_(_a )
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
snake_case__ = self.next_(_a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
return index - (index & (-index))
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int , _a:int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ = self.next_(_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
self.add(_a , value - self.get(_a ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int ):
if right == 0:
return 0
snake_case__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ = self.prev(_a )
return result
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:int , _a:int ):
return self.prefix(_a ) - self.prefix(_a )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
return self.query(_a , index + 1 )
def SCREAMING_SNAKE_CASE__ ( self:str , _a:int ):
value -= self.tree[0]
if value < 0:
return -1
snake_case__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.